diff options
Diffstat (limited to 'sys/dev')
71 files changed, 4635 insertions, 3677 deletions
diff --git a/sys/dev/amdsmu/amdsmu.c b/sys/dev/amdsmu/amdsmu.c new file mode 100644 index 000000000000..416f875c6176 --- /dev/null +++ b/sys/dev/amdsmu/amdsmu.c @@ -0,0 +1,466 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 The FreeBSD Foundation + * + * This software was developed by Aymeric Wibo <obiwac@freebsd.org> + * under sponsorship from the FreeBSD Foundation. + */ + +#include <sys/param.h> +#include <sys/bus.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/rman.h> +#include <sys/sysctl.h> + +#include <dev/pci/pcivar.h> +#include <dev/amdsmu/amdsmu.h> + +static bool +amdsmu_match(device_t dev, const struct amdsmu_product **product_out) +{ + const uint16_t vendorid = pci_get_vendor(dev); + const uint16_t deviceid = pci_get_device(dev); + + for (size_t i = 0; i < nitems(amdsmu_products); i++) { + const struct amdsmu_product *prod = &amdsmu_products[i]; + + if (vendorid == prod->amdsmu_vendorid && + deviceid == prod->amdsmu_deviceid) { + if (product_out != NULL) + *product_out = prod; + return (true); + } + } + return (false); +} + +static void +amdsmu_identify(driver_t *driver, device_t parent) +{ + if (device_find_child(parent, "amdsmu", -1) != NULL) + return; + + if (amdsmu_match(parent, NULL)) { + if (device_add_child(parent, "amdsmu", -1) == NULL) + device_printf(parent, "add amdsmu child failed\n"); + } +} + +static int +amdsmu_probe(device_t dev) +{ + if (resource_disabled("amdsmu", 0)) + return (ENXIO); + if (!amdsmu_match(device_get_parent(dev), NULL)) + return (ENXIO); + device_set_descf(dev, "AMD System Management Unit"); + + return (BUS_PROBE_GENERIC); +} + +static enum amdsmu_res +amdsmu_wait_res(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + enum amdsmu_res res; + + /* + * The SMU has a response ready for us when the response register is + * set. Otherwise, we must wait. + */ + for (size_t i = 0; i < SMU_RES_READ_MAX; i++) { + res = amdsmu_read4(sc, SMU_REG_RESPONSE); + if (res != SMU_RES_WAIT) + return (res); + pause_sbt("amdsmu", ustosbt(SMU_RES_READ_PERIOD_US), 0, + C_HARDCLOCK); + } + device_printf(dev, "timed out waiting for response from SMU\n"); + return (SMU_RES_WAIT); +} + +static int +amdsmu_cmd(device_t dev, enum amdsmu_msg msg, uint32_t arg, uint32_t *ret) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + enum amdsmu_res res; + + /* Wait for SMU to be ready. */ + if (amdsmu_wait_res(dev) == SMU_RES_WAIT) + return (ETIMEDOUT); + + /* Clear previous response. */ + amdsmu_write4(sc, SMU_REG_RESPONSE, SMU_RES_WAIT); + + /* Write out command to registers. */ + amdsmu_write4(sc, SMU_REG_MESSAGE, msg); + amdsmu_write4(sc, SMU_REG_ARGUMENT, arg); + + /* Wait for SMU response and handle it. */ + res = amdsmu_wait_res(dev); + + switch (res) { + case SMU_RES_WAIT: + return (ETIMEDOUT); + case SMU_RES_OK: + if (ret != NULL) + *ret = amdsmu_read4(sc, SMU_REG_ARGUMENT); + return (0); + case SMU_RES_REJECT_BUSY: + device_printf(dev, "SMU is busy\n"); + return (EBUSY); + case SMU_RES_REJECT_PREREQ: + case SMU_RES_UNKNOWN: + case SMU_RES_FAILED: + device_printf(dev, "SMU error: %02x\n", res); + return (EIO); + } + + return (EINVAL); +} + +static int +amdsmu_get_vers(device_t dev) +{ + int err; + uint32_t smu_vers; + struct amdsmu_softc *sc = device_get_softc(dev); + + err = amdsmu_cmd(dev, SMU_MSG_GETSMUVERSION, 0, &smu_vers); + if (err != 0) { + device_printf(dev, "failed to get SMU version\n"); + return (err); + } + sc->smu_program = (smu_vers >> 24) & 0xFF; + sc->smu_maj = (smu_vers >> 16) & 0xFF; + sc->smu_min = (smu_vers >> 8) & 0xFF; + sc->smu_rev = smu_vers & 0xFF; + device_printf(dev, "SMU version: %d.%d.%d (program %d)\n", + sc->smu_maj, sc->smu_min, sc->smu_rev, sc->smu_program); + + return (0); +} + +static int +amdsmu_get_ip_blocks(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + const uint16_t deviceid = pci_get_device(dev); + int err; + struct amdsmu_metrics *m = &sc->metrics; + bool active; + char sysctl_descr[32]; + + /* Get IP block count. */ + switch (deviceid) { + case PCI_DEVICEID_AMD_REMBRANDT_ROOT: + sc->ip_block_count = 12; + break; + case PCI_DEVICEID_AMD_PHOENIX_ROOT: + sc->ip_block_count = 21; + break; + /* TODO How many IP blocks does Strix Point (and the others) have? */ + case PCI_DEVICEID_AMD_STRIX_POINT_ROOT: + default: + sc->ip_block_count = nitems(amdsmu_ip_blocks_names); + } + KASSERT(sc->ip_block_count <= nitems(amdsmu_ip_blocks_names), + ("too many IP blocks for array")); + + /* Get and print out IP blocks. */ + err = amdsmu_cmd(dev, SMU_MSG_GET_SUP_CONSTRAINTS, 0, + &sc->active_ip_blocks); + if (err != 0) { + device_printf(dev, "failed to get IP blocks\n"); + return (err); + } + device_printf(dev, "Active IP blocks: "); + for (size_t i = 0; i < sc->ip_block_count; i++) { + active = (sc->active_ip_blocks & (1 << i)) != 0; + sc->ip_blocks_active[i] = active; + if (!active) + continue; + printf("%s%s", amdsmu_ip_blocks_names[i], + i + 1 < sc->ip_block_count ? " " : "\n"); + } + + /* Create a sysctl node for IP blocks. */ + sc->ip_blocks_sysctlnode = SYSCTL_ADD_NODE(sc->sysctlctx, + SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, "ip_blocks", + CTLFLAG_RD, NULL, "SMU metrics"); + if (sc->ip_blocks_sysctlnode == NULL) { + device_printf(dev, "could not add sysctl node for IP blocks\n"); + return (ENOMEM); + } + + /* Create a sysctl node for each IP block. */ + for (size_t i = 0; i < sc->ip_block_count; i++) { + /* Create the sysctl node itself for the IP block. */ + snprintf(sysctl_descr, sizeof sysctl_descr, + "Metrics about the %s AMD IP block", + amdsmu_ip_blocks_names[i]); + sc->ip_block_sysctlnodes[i] = SYSCTL_ADD_NODE(sc->sysctlctx, + SYSCTL_CHILDREN(sc->ip_blocks_sysctlnode), OID_AUTO, + amdsmu_ip_blocks_names[i], CTLFLAG_RD, NULL, sysctl_descr); + if (sc->ip_block_sysctlnodes[i] == NULL) { + device_printf(dev, + "could not add sysctl node for \"%s\"\n", sysctl_descr); + continue; + } + /* + * Create sysctls for if the IP block is currently active, last + * active time, and total active time. + */ + SYSCTL_ADD_BOOL(sc->sysctlctx, + SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO, + "active", CTLFLAG_RD, &sc->ip_blocks_active[i], 0, + "IP block is currently active"); + SYSCTL_ADD_U64(sc->sysctlctx, + SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO, + "last_time", CTLFLAG_RD, &m->ip_block_last_active_time[i], + 0, "How long the IP block was active for during the last" + " sleep (us)"); +#ifdef IP_BLOCK_TOTAL_ACTIVE_TIME + SYSCTL_ADD_U64(sc->sysctlctx, + SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO, + "total_time", CTLFLAG_RD, &m->ip_block_total_active_time[i], + 0, "How long the IP block was active for during sleep in" + " total (us)"); +#endif + } + return (0); +} + +static int +amdsmu_init_metrics(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + int err; + uint32_t metrics_addr_lo, metrics_addr_hi; + uint64_t metrics_addr; + + /* Get physical address of logging buffer. */ + err = amdsmu_cmd(dev, SMU_MSG_LOG_GETDRAM_ADDR_LO, 0, &metrics_addr_lo); + if (err != 0) + return (err); + err = amdsmu_cmd(dev, SMU_MSG_LOG_GETDRAM_ADDR_HI, 0, &metrics_addr_hi); + if (err != 0) + return (err); + metrics_addr = ((uint64_t) metrics_addr_hi << 32) | metrics_addr_lo; + + /* Map memory of logging buffer. */ + err = bus_space_map(sc->bus_tag, metrics_addr, + sizeof(struct amdsmu_metrics), 0, &sc->metrics_space); + if (err != 0) { + device_printf(dev, "could not map bus space for SMU metrics\n"); + return (err); + } + + /* Start logging for metrics. */ + amdsmu_cmd(dev, SMU_MSG_LOG_RESET, 0, NULL); + amdsmu_cmd(dev, SMU_MSG_LOG_START, 0, NULL); + return (0); +} + +static int +amdsmu_dump_metrics(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + int err; + + err = amdsmu_cmd(dev, SMU_MSG_LOG_DUMP_DATA, 0, NULL); + if (err != 0) { + device_printf(dev, "failed to dump metrics\n"); + return (err); + } + bus_space_read_region_4(sc->bus_tag, sc->metrics_space, 0, + (uint32_t *)&sc->metrics, sizeof(sc->metrics) / sizeof(uint32_t)); + + return (0); +} + +static void +amdsmu_fetch_idlemask(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + + sc->idlemask = amdsmu_read4(sc, SMU_REG_IDLEMASK); +} + +static int +amdsmu_attach(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + int err; + uint32_t physbase_addr_lo, physbase_addr_hi; + uint64_t physbase_addr; + int rid = 0; + struct sysctl_oid *node; + + /* + * Find physical base address for SMU. + * XXX I am a little confused about the masks here. I'm just copying + * what Linux does in the amd-pmc driver to get the base address. + */ + pci_write_config(dev, SMU_INDEX_ADDRESS, SMU_PHYSBASE_ADDR_LO, 4); + physbase_addr_lo = pci_read_config(dev, SMU_INDEX_DATA, 4) & 0xFFF00000; + + pci_write_config(dev, SMU_INDEX_ADDRESS, SMU_PHYSBASE_ADDR_HI, 4); + physbase_addr_hi = pci_read_config(dev, SMU_INDEX_DATA, 4) & 0x0000FFFF; + + physbase_addr = (uint64_t)physbase_addr_hi << 32 | physbase_addr_lo; + + /* Map memory for SMU and its registers. */ + sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (sc->res == NULL) { + device_printf(dev, "could not allocate resource\n"); + return (ENXIO); + } + + sc->bus_tag = rman_get_bustag(sc->res); + + if (bus_space_map(sc->bus_tag, physbase_addr, + SMU_MEM_SIZE, 0, &sc->smu_space) != 0) { + device_printf(dev, "could not map bus space for SMU\n"); + err = ENXIO; + goto err_smu_space; + } + if (bus_space_map(sc->bus_tag, physbase_addr + SMU_REG_SPACE_OFF, + SMU_MEM_SIZE, 0, &sc->reg_space) != 0) { + device_printf(dev, "could not map bus space for SMU regs\n"); + err = ENXIO; + goto err_reg_space; + } + + /* sysctl stuff. */ + sc->sysctlctx = device_get_sysctl_ctx(dev); + sc->sysctlnode = device_get_sysctl_tree(dev); + + /* Get version & add sysctls. */ + if ((err = amdsmu_get_vers(dev)) != 0) + goto err_dump; + + SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, + "program", CTLFLAG_RD, &sc->smu_program, 0, "SMU program number"); + SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, + "version_major", CTLFLAG_RD, &sc->smu_maj, 0, + "SMU firmware major version number"); + SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, + "version_minor", CTLFLAG_RD, &sc->smu_min, 0, + "SMU firmware minor version number"); + SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, + "version_revision", CTLFLAG_RD, &sc->smu_rev, 0, + "SMU firmware revision number"); + + /* Set up for getting metrics & add sysctls. */ + if ((err = amdsmu_init_metrics(dev)) != 0) + goto err_dump; + if ((err = amdsmu_dump_metrics(dev)) != 0) + goto err_dump; + + node = SYSCTL_ADD_NODE(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), + OID_AUTO, "metrics", CTLFLAG_RD, NULL, "SMU metrics"); + if (node == NULL) { + device_printf(dev, "could not add sysctl node for metrics\n"); + err = ENOMEM; + goto err_dump; + } + + SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "table_version", CTLFLAG_RD, &sc->metrics.table_version, 0, + "SMU metrics table version"); + SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "hint_count", CTLFLAG_RD, &sc->metrics.hint_count, 0, + "How many times the sleep hint was set"); + SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "s0i3_last_entry_status", CTLFLAG_RD, + &sc->metrics.s0i3_last_entry_status, 0, + "1 if last S0i3 entry was successful"); + SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "time_last_in_s0i2", CTLFLAG_RD, &sc->metrics.time_last_in_s0i2, 0, + "Time spent in S0i2 during last sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "time_last_entering_s0i3", CTLFLAG_RD, + &sc->metrics.time_last_entering_s0i3, 0, + "Time spent entering S0i3 during last sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "total_time_entering_s0i3", CTLFLAG_RD, + &sc->metrics.total_time_entering_s0i3, 0, + "Total time spent entering S0i3 (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "time_last_resuming", CTLFLAG_RD, &sc->metrics.time_last_resuming, + 0, "Time spent resuming from last sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "total_time_resuming", CTLFLAG_RD, &sc->metrics.total_time_resuming, + 0, "Total time spent resuming from sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "time_last_in_s0i3", CTLFLAG_RD, &sc->metrics.time_last_in_s0i3, 0, + "Time spent in S0i3 during last sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "total_time_in_s0i3", CTLFLAG_RD, &sc->metrics.total_time_in_s0i3, + 0, "Total time spent in S0i3 (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "time_last_in_sw_drips", CTLFLAG_RD, + &sc->metrics.time_last_in_sw_drips, 0, + "Time spent in awake during last sleep (us)"); + SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO, + "total_time_in_sw_drips", CTLFLAG_RD, + &sc->metrics.total_time_in_sw_drips, 0, + "Total time spent awake (us)"); + + /* Get IP blocks & add sysctls. */ + err = amdsmu_get_ip_blocks(dev); + if (err != 0) + goto err_dump; + + /* Get idlemask & add sysctl. */ + amdsmu_fetch_idlemask(dev); + SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, + "idlemask", CTLFLAG_RD, &sc->idlemask, 0, "SMU idlemask. This " + "value is not documented - only used to help AMD internally debug " + "issues"); + + return (0); +err_dump: + bus_space_unmap(sc->bus_tag, sc->reg_space, SMU_MEM_SIZE); +err_reg_space: + bus_space_unmap(sc->bus_tag, sc->smu_space, SMU_MEM_SIZE); +err_smu_space: + bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res); + return (err); +} + +static int +amdsmu_detach(device_t dev) +{ + struct amdsmu_softc *sc = device_get_softc(dev); + int rid = 0; + + bus_space_unmap(sc->bus_tag, sc->smu_space, SMU_MEM_SIZE); + bus_space_unmap(sc->bus_tag, sc->reg_space, SMU_MEM_SIZE); + + bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res); + return (0); +} + +static device_method_t amdsmu_methods[] = { + DEVMETHOD(device_identify, amdsmu_identify), + DEVMETHOD(device_probe, amdsmu_probe), + DEVMETHOD(device_attach, amdsmu_attach), + DEVMETHOD(device_detach, amdsmu_detach), + DEVMETHOD_END +}; + +static driver_t amdsmu_driver = { + "amdsmu", + amdsmu_methods, + sizeof(struct amdsmu_softc), +}; + +DRIVER_MODULE(amdsmu, hostb, amdsmu_driver, NULL, NULL); +MODULE_VERSION(amdsmu, 1); +MODULE_DEPEND(amdsmu, amdsmn, 1, 1, 1); +MODULE_PNP_INFO("U16:vendor;U16:device", pci, amdsmu, amdsmu_products, + nitems(amdsmu_products)); diff --git a/sys/dev/amdsmu/amdsmu.h b/sys/dev/amdsmu/amdsmu.h new file mode 100644 index 000000000000..025887f7fe5a --- /dev/null +++ b/sys/dev/amdsmu/amdsmu.h @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 The FreeBSD Foundation + * + * This software was developed by Aymeric Wibo <obiwac@freebsd.org> + * under sponsorship from the FreeBSD Foundation. + */ +#ifndef _AMDSMU_H_ +#define _AMDSMU_H_ + +#include <sys/param.h> +#include <sys/bus.h> +#include <sys/kernel.h> +#include <machine/bus.h> +#include <x86/cputypes.h> + +#include <dev/amdsmu/amdsmu_reg.h> + +#define SMU_RES_READ_PERIOD_US 50 +#define SMU_RES_READ_MAX 20000 + +static const struct amdsmu_product { + uint16_t amdsmu_vendorid; + uint16_t amdsmu_deviceid; +} amdsmu_products[] = { + { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_REMBRANDT_ROOT }, + { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_PHOENIX_ROOT }, + { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_STRIX_POINT_ROOT }, +}; + +static const char *const amdsmu_ip_blocks_names[] = { + "DISPLAY", + "CPU", + "GFX", + "VDD", + "ACP", + "VCN", + "ISP", + "NBIO", + "DF", + "USB3_0", + "USB3_1", + "LAPIC", + "USB3_2", + "USB3_3", + "USB3_4", + "USB4_0", + "USB4_1", + "MPM", + "JPEG", + "IPU", + "UMSCH", + "VPE", +}; + +CTASSERT(nitems(amdsmu_ip_blocks_names) <= 32); + +struct amdsmu_softc { + struct sysctl_ctx_list *sysctlctx; + struct sysctl_oid *sysctlnode; + + struct resource *res; + bus_space_tag_t bus_tag; + + bus_space_handle_t smu_space; + bus_space_handle_t reg_space; + + uint8_t smu_program; + uint8_t smu_maj, smu_min, smu_rev; + + uint32_t active_ip_blocks; + struct sysctl_oid *ip_blocks_sysctlnode; + size_t ip_block_count; + struct sysctl_oid *ip_block_sysctlnodes[nitems(amdsmu_ip_blocks_names)]; + bool ip_blocks_active[nitems(amdsmu_ip_blocks_names)]; + + bus_space_handle_t metrics_space; + struct amdsmu_metrics metrics; + uint32_t idlemask; +}; + +static inline uint32_t +amdsmu_read4(const struct amdsmu_softc *sc, bus_size_t reg) +{ + return (bus_space_read_4(sc->bus_tag, sc->reg_space, reg)); +} + +static inline void +amdsmu_write4(const struct amdsmu_softc *sc, bus_size_t reg, uint32_t val) +{ + bus_space_write_4(sc->bus_tag, sc->reg_space, reg, val); +} + +#endif /* _AMDSMU_H_ */ diff --git a/sys/dev/amdsmu/amdsmu_reg.h b/sys/dev/amdsmu/amdsmu_reg.h new file mode 100644 index 000000000000..e685b34e6883 --- /dev/null +++ b/sys/dev/amdsmu/amdsmu_reg.h @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 The FreeBSD Foundation + * + * This software was developed by Aymeric Wibo <obiwac@freebsd.org> + * under sponsorship from the FreeBSD Foundation. + */ +#ifndef _AMDSMU_REG_H_ +#define _AMDSMU_REG_H_ + +#include <sys/types.h> + +/* + * TODO These are in common with amdtemp; should we find a way to factor these + * out? Also, there are way more of these. I couldn't find a centralized place + * which lists them though. + */ +#define PCI_DEVICEID_AMD_REMBRANDT_ROOT 0x14B5 +#define PCI_DEVICEID_AMD_PHOENIX_ROOT 0x14E8 +#define PCI_DEVICEID_AMD_STRIX_POINT_ROOT 0x14A4 + +#define SMU_INDEX_ADDRESS 0xB8 +#define SMU_INDEX_DATA 0xBC + +#define SMU_PHYSBASE_ADDR_LO 0x13B102E8 +#define SMU_PHYSBASE_ADDR_HI 0x13B102EC + +#define SMU_MEM_SIZE 0x1000 +#define SMU_REG_SPACE_OFF 0x10000 + +#define SMU_REG_MESSAGE 0x538 +#define SMU_REG_RESPONSE 0x980 +#define SMU_REG_ARGUMENT 0x9BC +#define SMU_REG_IDLEMASK 0xD14 + +enum amdsmu_res { + SMU_RES_WAIT = 0x00, + SMU_RES_OK = 0x01, + SMU_RES_REJECT_BUSY = 0xFC, + SMU_RES_REJECT_PREREQ = 0xFD, + SMU_RES_UNKNOWN = 0xFE, + SMU_RES_FAILED = 0xFF, +}; + +enum amdsmu_msg { + SMU_MSG_GETSMUVERSION = 0x02, + SMU_MSG_LOG_GETDRAM_ADDR_HI = 0x04, + SMU_MSG_LOG_GETDRAM_ADDR_LO = 0x05, + SMU_MSG_LOG_START = 0x06, + SMU_MSG_LOG_RESET = 0x07, + SMU_MSG_LOG_DUMP_DATA = 0x08, + SMU_MSG_GET_SUP_CONSTRAINTS = 0x09, +}; + +/* XXX Copied from Linux struct smu_metrics. */ +struct amdsmu_metrics { + uint32_t table_version; + uint32_t hint_count; + uint32_t s0i3_last_entry_status; + uint32_t time_last_in_s0i2; + uint64_t time_last_entering_s0i3; + uint64_t total_time_entering_s0i3; + uint64_t time_last_resuming; + uint64_t total_time_resuming; + uint64_t time_last_in_s0i3; + uint64_t total_time_in_s0i3; + uint64_t time_last_in_sw_drips; + uint64_t total_time_in_sw_drips; + /* + * This is how long each IP block was active for (us), i.e., blocking + * entry to S0i3. In Linux, these are called "timecondition_notmet_*". + * + * XXX Total active time for IP blocks seems to be buggy and reporting + * garbage (at least on Phoenix), so it's disabled for now. The last + * active time for the USB4_0 IP block also seems to be buggy. + */ + uint64_t ip_block_last_active_time[32]; +#ifdef IP_BLOCK_TOTAL_ACTIVE_TIME + uint64_t ip_block_total_active_time[32]; +#endif +} __attribute__((packed)); + +#endif /* _AMDSMU_REG_H_ */ diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c index 8547f21586e1..7a6b1cbdd736 100644 --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -703,7 +703,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) for (m = sndptr; m != NULL; m = m->m_next) { int n; - if ((m->m_flags & M_NOTAVAIL) != 0) + if ((m->m_flags & M_NOTREADY) != 0) break; if (m->m_flags & M_EXTPG) { #ifdef KERN_TLS @@ -787,7 +787,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) /* nothing to send */ if (plen == 0) { - KASSERT(m == NULL || (m->m_flags & M_NOTAVAIL) != 0, + KASSERT(m == NULL || (m->m_flags & M_NOTREADY) != 0, ("%s: nothing to send, but m != NULL is ready", __func__)); break; @@ -880,7 +880,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) toep->txsd_avail--; t4_l2t_send(sc, wr, toep->l2te); - } while (m != NULL && (m->m_flags & M_NOTAVAIL) == 0); + } while (m != NULL && (m->m_flags & M_NOTREADY) == 0); /* Send a FIN if requested, but only if there's no more data to send */ if (m == NULL && toep->flags & TPF_SEND_FIN) diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c index c6377980fca9..27c16b9988ae 100644 --- a/sys/dev/cxgbe/tom/t4_tls.c +++ b/sys/dev/cxgbe/tom/t4_tls.c @@ -563,7 +563,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop) * If there is no ready data to send, wait until more * data arrives. */ - if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) { + if (m == NULL || (m->m_flags & M_NOTREADY) != 0) { if (sowwakeup) sowwakeup_locked(so); else @@ -614,7 +614,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop) /* Shove if there is no additional data pending. */ shove = ((m->m_next == NULL || - (m->m_next->m_flags & M_NOTAVAIL) != 0)) && + (m->m_next->m_flags & M_NOTREADY) != 0)) && (tp->t_flags & TF_MORETOCOME) == 0; if (sb->sb_flags & SB_AUTOSIZE && diff --git a/sys/dev/drm2/drm_fb_helper.c b/sys/dev/drm2/drm_fb_helper.c index f67cc9f60d02..1f4abd255690 100644 --- a/sys/dev/drm2/drm_fb_helper.c +++ b/sys/dev/drm2/drm_fb_helper.c @@ -51,7 +51,7 @@ struct vt_kms_softc { struct task fb_mode_task; }; -/* Call restore out of vt(9) locks. */ +/* Call restore out of vt(4) locks. */ static void vt_restore_fbdev_mode(void *arg, int pending) { diff --git a/sys/dev/efidev/efirt.c b/sys/dev/efidev/efirt.c index b0fa33daeca7..b55c1c191077 100644 --- a/sys/dev/efidev/efirt.c +++ b/sys/dev/efidev/efirt.c @@ -107,7 +107,8 @@ static int efi_status2err[25] = { enum efi_table_type { TYPE_ESRT = 0, - TYPE_PROP + TYPE_PROP, + TYPE_MEMORY_ATTR }; static int efi_enter(void); @@ -445,6 +446,42 @@ get_table_length(enum efi_table_type type, size_t *table_len, void **taddr) free(buf, M_TEMP); return (0); } + case TYPE_MEMORY_ATTR: + { + efi_guid_t guid = EFI_MEMORY_ATTRIBUTES_TABLE; + struct efi_memory_attribute_table *tbl_addr, *mem_addr; + int error; + void *buf; + size_t len = sizeof(struct efi_memory_attribute_table); + + error = efi_get_table(&guid, (void **)&tbl_addr); + if (error) + return (error); + + buf = malloc(len, M_TEMP, M_WAITOK); + error = physcopyout((vm_paddr_t)tbl_addr, buf, len); + if (error) { + free(buf, M_TEMP); + return (error); + } + + mem_addr = (struct efi_memory_attribute_table *)buf; + if (mem_addr->version != 2) { + free(buf, M_TEMP); + return (EINVAL); + } + len += mem_addr->descriptor_size * mem_addr->num_ents; + if (len > EFI_TABLE_ALLOC_MAX) { + free(buf, M_TEMP); + return (ENOMEM); + } + + *table_len = len; + if (taddr != NULL) + *taddr = tbl_addr; + free(buf, M_TEMP); + return (0); + } } return (ENOENT); } @@ -457,7 +494,8 @@ copy_table(efi_guid_t *guid, void **buf, size_t buf_len, size_t *table_len) enum efi_table_type type; } tables[] = { { EFI_TABLE_ESRT, TYPE_ESRT }, - { EFI_PROPERTIES_TABLE, TYPE_PROP } + { EFI_PROPERTIES_TABLE, TYPE_PROP }, + { EFI_MEMORY_ATTRIBUTES_TABLE, TYPE_MEMORY_ATTR } }; size_t table_idx; void *taddr; diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c index 2987af634866..94f4e5771266 100644 --- a/sys/dev/gpio/acpi_gpiobus.c +++ b/sys/dev/gpio/acpi_gpiobus.c @@ -36,6 +36,7 @@ #include <dev/gpio/gpiobusvar.h> #include <dev/gpio/acpi_gpiobusvar.h> +#include <dev/gpio/gpiobus_internal.h> #include "gpiobus_if.h" @@ -356,7 +357,7 @@ acpi_gpiobus_attach(device_t dev) status = AcpiWalkResources(handle, "_AEI", acpi_gpiobus_enumerate_aei, &ctx); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) device_printf(dev, "Failed to enumerate AEI resources\n"); return (0); diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index 2e2618805e7b..764bcb7e6ee8 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -39,6 +39,7 @@ #include <sys/sbuf.h> #include <dev/gpio/gpiobusvar.h> +#include <dev/gpio/gpiobus_internal.h> #include "gpiobus_if.h" @@ -109,10 +110,9 @@ gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1, alloc_flags); if (res == NULL) { - intr_free_intr_map_data((struct intr_map_data *)gpio_data); + intr_unmap_irq(irq); return (NULL); } - rman_set_virtual(res, gpio_data); return (res); } #else @@ -213,20 +213,40 @@ gpio_pin_is_active(gpio_pin_t pin, bool *active) return (0); } +/* + * Note that this function should only + * be used in cases where a pre-existing + * gpiobus_pin structure exists. In most + * cases, the gpio_pin_get_by_* functions + * suffice. + */ +int +gpio_pin_acquire(gpio_pin_t gpio) +{ + device_t busdev; + + KASSERT(gpio != NULL, ("GPIO pin is NULL.")); + KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL.")); + + busdev = GPIO_GET_BUS(gpio->dev); + if (busdev == NULL) + return (ENXIO); + + return (gpiobus_acquire_pin(busdev, gpio->pin)); +} + void gpio_pin_release(gpio_pin_t gpio) { device_t busdev; - if (gpio == NULL) - return; - + KASSERT(gpio != NULL, ("GPIO pin is NULL.")); KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL.")); busdev = GPIO_GET_BUS(gpio->dev); - if (busdev != NULL) - gpiobus_release_pin(busdev, gpio->pin); + KASSERT(busdev != NULL, ("gpiobus dev is NULL.")); + gpiobus_release_pin(busdev, gpio->pin); free(gpio, M_DEVBUF); } @@ -293,7 +313,7 @@ gpiobus_print_pins(struct gpiobus_ivar *devi, struct sbuf *sb) } device_t -gpiobus_attach_bus(device_t dev) +gpiobus_add_bus(device_t dev) { device_t busdev; @@ -307,8 +327,24 @@ gpiobus_attach_bus(device_t dev) #ifdef FDT ofw_gpiobus_register_provider(dev); #endif - bus_attach_children(dev); + return (busdev); +} + +/* + * Attach a gpiobus child. + * Note that the controller is expected + * to be fully initialized at this point. + */ +device_t +gpiobus_attach_bus(device_t dev) +{ + device_t busdev; + + busdev = gpiobus_add_bus(dev); + if (busdev == NULL) + return (NULL); + bus_attach_children(dev); return (busdev); } @@ -385,14 +421,13 @@ gpiobus_acquire_pin(device_t bus, uint32_t pin) sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { - device_printf(bus, - "invalid pin %d, max: %d\n", pin, sc->sc_npins - 1); - return (-1); + panic("%s: invalid pin %d, max: %d", + device_get_nameunit(bus), pin, sc->sc_npins - 1); } /* Mark pin as mapped and give warning if it's already mapped. */ if (sc->sc_pins[pin].mapped) { device_printf(bus, "warning: pin %d is already mapped\n", pin); - return (-1); + return (EBUSY); } sc->sc_pins[pin].mapped = 1; @@ -400,7 +435,7 @@ gpiobus_acquire_pin(device_t bus, uint32_t pin) } /* Release mapped pin */ -int +void gpiobus_release_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; @@ -408,19 +443,15 @@ gpiobus_release_pin(device_t bus, uint32_t pin) sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { - device_printf(bus, - "invalid pin %d, max=%d\n", - pin, sc->sc_npins - 1); - return (-1); + panic("%s: invalid pin %d, max: %d", + device_get_nameunit(bus), pin, sc->sc_npins - 1); } - if (!sc->sc_pins[pin].mapped) { - device_printf(bus, "pin %d is not mapped\n", pin); - return (-1); - } - sc->sc_pins[pin].mapped = 0; + if (!sc->sc_pins[pin].mapped) + panic("%s: pin %d is not mapped", device_get_nameunit(bus), + pin); - return (0); + sc->sc_pins[pin].mapped = 0; } static int @@ -435,8 +466,7 @@ gpiobus_acquire_child_pins(device_t dev, device_t child) device_printf(child, "cannot acquire pin %d\n", devi->pins[i]); while (--i >= 0) { - (void)gpiobus_release_pin(dev, - devi->pins[i]); + gpiobus_release_pin(dev, devi->pins[i]); } gpiobus_free_ivars(devi); return (EBUSY); @@ -835,6 +865,25 @@ gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid, end, count, flags)); } +static int +gpiobus_release_resource(device_t dev, device_t child, struct resource *r) +{ + int err; +#ifdef INTRNG + u_int irq; + + irq = rman_get_start(r); + MPASS(irq == rman_get_end(r)); +#endif + err = bus_generic_rman_release_resource(dev, child, r); + if (err != 0) + return (err); +#ifdef INTRNG + intr_unmap_irq(irq); +#endif + return (0); +} + static struct resource_list * gpiobus_get_resource_list(device_t bus __unused, device_t child) { @@ -1029,7 +1078,7 @@ static device_method_t gpiobus_methods[] = { DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource), - DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), + DEVMETHOD(bus_release_resource, gpiobus_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_rman_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource), DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list), diff --git a/sys/dev/sound/midi/sequencer.h b/sys/dev/gpio/gpiobus_internal.h index 22ea0ae6c1b6..de3f57663132 100644 --- a/sys/dev/sound/midi/sequencer.h +++ b/sys/dev/gpio/gpiobus_internal.h @@ -1,8 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2003 Mathew Kanner - * Copyright (c) 1999 Seigo Tanimura + * Copyright (c) 2009 Oleksandr Tymoshenko <gonzo@freebsd.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -25,65 +24,24 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ -/* - * Include file for the midi sequence driver. - */ - -#ifndef _SEQUENCER_H_ -#define _SEQUENCER_H_ - -#define NSEQ_MAX 16 +#ifndef __GPIOBUS_INTERNAL_H__ +#define __GPIOBUS_INTERNAL_H__ /* - * many variables should be reduced to a range. Here define a macro + * Functions shared between gpiobus and other bus classes that derive from it; + * these should not be called directly by other drivers. */ - -#define RANGE(var, low, high) (var) = \ -((var)<(low)?(low) : (var)>(high)?(high) : (var)) - -#ifdef _KERNEL - -void seq_timer(void *arg); - -SYSCTL_DECL(_hw_midi_seq); - -extern int seq_debug; - -#define SEQ_DEBUG(y, x) \ - do { \ - if (seq_debug >= y) { \ - (x); \ - } \ - } while (0) - -SYSCTL_DECL(_hw_midi); - -#endif /* _KERNEL */ - -#define SYNTHPROP_MIDI 1 -#define SYNTHPROP_SYNTH 2 -#define SYNTHPROP_RX 4 -#define SYNTHPROP_TX 8 - -struct _midi_cmdtab { - int cmd; - char *name; -}; -typedef struct _midi_cmdtab midi_cmdtab; -extern midi_cmdtab cmdtab_seqevent[]; -extern midi_cmdtab cmdtab_seqioctl[]; -extern midi_cmdtab cmdtab_timer[]; -extern midi_cmdtab cmdtab_seqcv[]; -extern midi_cmdtab cmdtab_seqccmn[]; - -char *midi_cmdname(int cmd, midi_cmdtab * tab); - -enum { - MORE, - TIMERARMED, - QUEUEFULL -}; - +int gpiobus_attach(device_t); +int gpiobus_detach(device_t); +int gpiobus_init_softc(device_t); +int gpiobus_alloc_ivars(struct gpiobus_ivar *); +void gpiobus_free_ivars(struct gpiobus_ivar *); +int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *); +int gpiobus_acquire_pin(device_t, uint32_t); +void gpiobus_release_pin(device_t, uint32_t); + +extern driver_t gpiobus_driver; #endif diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h index 74783e112f89..7f504236a774 100644 --- a/sys/dev/gpio/gpiobusvar.h +++ b/sys/dev/gpio/gpiobusvar.h @@ -156,6 +156,8 @@ int gpio_pin_get_by_bus_pinnum(device_t _bus, uint32_t _pinnum, gpio_pin_t *_gp) /* Acquire a pin by child and index (used by direct children of gpiobus). */ int gpio_pin_get_by_child_index(device_t _child, uint32_t _idx, gpio_pin_t *_gp); +/* Acquire a pin from an existing gpio_pin_t. */ +int gpio_pin_acquire(gpio_pin_t gpio); /* Release a pin acquired via any gpio_pin_get_xxx() function. */ void gpio_pin_release(gpio_pin_t gpio); @@ -167,22 +169,9 @@ int gpio_pin_setflags(gpio_pin_t pin, uint32_t flags); struct resource *gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode); -/* - * Functions shared between gpiobus and other bus classes that derive from it; - * these should not be called directly by other drivers. - */ int gpio_check_flags(uint32_t, uint32_t); +device_t gpiobus_add_bus(device_t); device_t gpiobus_attach_bus(device_t); int gpiobus_detach_bus(device_t); -int gpiobus_attach(device_t); -int gpiobus_detach(device_t); -int gpiobus_init_softc(device_t); -int gpiobus_alloc_ivars(struct gpiobus_ivar *); -void gpiobus_free_ivars(struct gpiobus_ivar *); -int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *); -int gpiobus_acquire_pin(device_t, uint32_t); -int gpiobus_release_pin(device_t, uint32_t); - -extern driver_t gpiobus_driver; #endif /* __GPIOBUS_H__ */ diff --git a/sys/dev/gpio/gpiopps.c b/sys/dev/gpio/gpiopps.c index bb8afa5e062c..82620a50a798 100644 --- a/sys/dev/gpio/gpiopps.c +++ b/sys/dev/gpio/gpiopps.c @@ -160,7 +160,7 @@ gpiopps_detach(device_t dev) if (sc->ires != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irid, sc->ires); if (sc->gpin != NULL) - gpiobus_release_pin(GPIO_GET_BUS(sc->gpin->dev), sc->gpin->pin); + gpio_pin_release(sc->gpin); return (0); } diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c index 32dc5b55e698..fc5fb03d6824 100644 --- a/sys/dev/gpio/ofw_gpiobus.c +++ b/sys/dev/gpio/ofw_gpiobus.c @@ -36,6 +36,7 @@ #include <sys/module.h> #include <dev/gpio/gpiobusvar.h> +#include <dev/gpio/gpiobus_internal.h> #include <dev/ofw/ofw_bus.h> #include "gpiobus_if.h" diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c index cc39790322b6..87d4310a6396 100644 --- a/sys/dev/gpio/pl061.c +++ b/sys/dev/gpio/pl061.c @@ -487,14 +487,21 @@ pl061_attach(device_t dev) } } + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "pl061", MTX_SPIN); + + if (sc->sc_xref != 0 && !intr_pic_register(dev, sc->sc_xref)) { + device_printf(dev, "couldn't register PIC\n"); + PL061_LOCK_DESTROY(sc); + goto free_isrc; + } + sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) { device_printf(dev, "couldn't attach gpio bus\n"); + PL061_LOCK_DESTROY(sc); goto free_isrc; } - mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "pl061", MTX_SPIN); - return (0); free_isrc: @@ -503,6 +510,7 @@ free_isrc: * for (irq = 0; irq < PL061_NUM_GPIO; irq++) * intr_isrc_deregister(PIC_INTR_ISRC(sc, irq)); */ + bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_hdlr); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); free_pic: diff --git a/sys/dev/gpio/pl061.h b/sys/dev/gpio/pl061.h index 809a1168493d..d9fe23e502b1 100644 --- a/sys/dev/gpio/pl061.h +++ b/sys/dev/gpio/pl061.h @@ -46,6 +46,7 @@ struct pl061_softc { struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_irq_hdlr; + intptr_t sc_xref; int sc_mem_rid; int sc_irq_rid; struct pl061_pin_irqsrc sc_isrcs[PL061_NUM_GPIO]; diff --git a/sys/dev/gpio/pl061_acpi.c b/sys/dev/gpio/pl061_acpi.c index f5885025083e..8e9921261e4e 100644 --- a/sys/dev/gpio/pl061_acpi.c +++ b/sys/dev/gpio/pl061_acpi.c @@ -67,19 +67,12 @@ pl061_acpi_probe(device_t dev) static int pl061_acpi_attach(device_t dev) { - int error; + struct pl061_softc *sc; - error = pl061_attach(dev); - if (error != 0) - return (error); + sc = device_get_softc(dev); + sc->sc_xref = ACPI_GPIO_XREF; - if (!intr_pic_register(dev, ACPI_GPIO_XREF)) { - device_printf(dev, "couldn't register PIC\n"); - pl061_detach(dev); - error = ENXIO; - } - - return (error); + return (pl061_attach(dev)); } static device_method_t pl061_acpi_methods[] = { diff --git a/sys/dev/gpio/pl061_fdt.c b/sys/dev/gpio/pl061_fdt.c index aa22298b43c6..681b3ccdfdeb 100644 --- a/sys/dev/gpio/pl061_fdt.c +++ b/sys/dev/gpio/pl061_fdt.c @@ -61,19 +61,12 @@ pl061_fdt_probe(device_t dev) static int pl061_fdt_attach(device_t dev) { - int error; + struct pl061_softc *sc; - error = pl061_attach(dev); - if (error != 0) - return (error); + sc = device_get_softc(dev); + sc->sc_xref = OF_xref_from_node(ofw_bus_get_node(dev)); - if (!intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev)))) { - device_printf(dev, "couldn't register PIC\n"); - pl061_detach(dev); - error = ENXIO; - } - - return (error); + return (pl061_attach(dev)); } static device_method_t pl061_fdt_methods[] = { diff --git a/sys/dev/gpio/qoriq_gpio.c b/sys/dev/gpio/qoriq_gpio.c index 25dfccede29f..8b44cd256c79 100644 --- a/sys/dev/gpio/qoriq_gpio.c +++ b/sys/dev/gpio/qoriq_gpio.c @@ -369,11 +369,6 @@ qoriq_gpio_attach(device_t dev) for (i = 0; i <= MAXPIN; i++) sc->sc_pins[i].gp_caps = DEFAULT_CAPS; - sc->busdev = gpiobus_attach_bus(dev); - if (sc->busdev == NULL) { - qoriq_gpio_detach(dev); - return (ENOMEM); - } /* * Enable the GPIO Input Buffer for all GPIOs. * This is safe on devices without a GPIBE register, because those @@ -384,6 +379,12 @@ qoriq_gpio_attach(device_t dev) OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); + sc->busdev = gpiobus_attach_bus(dev); + if (sc->busdev == NULL) { + qoriq_gpio_detach(dev); + return (ENOMEM); + } + return (0); } diff --git a/sys/dev/hyperv/vmbus/vmbus_chan.c b/sys/dev/hyperv/vmbus/vmbus_chan.c index 189a3e66a039..7ea60a499c72 100644 --- a/sys/dev/hyperv/vmbus/vmbus_chan.c +++ b/sys/dev/hyperv/vmbus/vmbus_chan.c @@ -1555,7 +1555,7 @@ vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags, continue; flags = atomic_swap_long(&event_flags[f], 0); - chid_base = f << VMBUS_EVTFLAG_SHIFT; + chid_base = f * VMBUS_EVTFLAG_LEN; while ((chid_ofs = ffsl(flags)) != 0) { struct vmbus_channel *chan; @@ -1599,7 +1599,7 @@ vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu) eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE; if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) { vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags, - VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT); + VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); } } @@ -1903,7 +1903,7 @@ vmbus_chan_msgproc_choffer(struct vmbus_softc *sc, * Setup event flag. */ chan->ch_evtflag = - &sc->vmbus_tx_evtflags[chan->ch_id >> VMBUS_EVTFLAG_SHIFT]; + &sc->vmbus_tx_evtflags[chan->ch_id / VMBUS_EVTFLAG_LEN]; chan->ch_evtflag_mask = 1UL << (chan->ch_id & VMBUS_EVTFLAG_MASK); /* diff --git a/sys/dev/hyperv/vmbus/vmbus_reg.h b/sys/dev/hyperv/vmbus/vmbus_reg.h index 4aa729475b5d..76cdca0ebeb2 100644 --- a/sys/dev/hyperv/vmbus/vmbus_reg.h +++ b/sys/dev/hyperv/vmbus/vmbus_reg.h @@ -60,16 +60,10 @@ CTASSERT(sizeof(struct vmbus_message) == VMBUS_MSG_SIZE); * Hyper-V SynIC event flags */ -#ifdef __LP64__ -#define VMBUS_EVTFLAGS_MAX 32 -#define VMBUS_EVTFLAG_SHIFT 6 -#else -#define VMBUS_EVTFLAGS_MAX 64 -#define VMBUS_EVTFLAG_SHIFT 5 -#endif -#define VMBUS_EVTFLAG_LEN (1 << VMBUS_EVTFLAG_SHIFT) +#define VMBUS_EVTFLAG_LEN (sizeof(u_long) * 8) #define VMBUS_EVTFLAG_MASK (VMBUS_EVTFLAG_LEN - 1) #define VMBUS_EVTFLAGS_SIZE 256 +#define VMBUS_EVTFLAGS_MAX (VMBUS_EVTFLAGS_SIZE / sizeof(u_long)) struct vmbus_evtflags { u_long evt_flags[VMBUS_EVTFLAGS_MAX]; diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h index 821abe4806ca..5b23757b1c98 100644 --- a/sys/dev/ice/ice_features.h +++ b/sys/dev/ice/ice_features.h @@ -91,7 +91,9 @@ enum feat_list { static inline void ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap) { +#ifndef PCI_IOV ice_clear_bit(ICE_FEATURE_SRIOV, bitmap); +#endif #ifndef DEV_NETMAP ice_clear_bit(ICE_FEATURE_NETMAP, bitmap); #endif diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h index 3a5dc201189a..e1d5307a9516 100644 --- a/sys/dev/ice/ice_iflib.h +++ b/sys/dev/ice/ice_iflib.h @@ -139,6 +139,9 @@ struct ice_irq_vector { * @tc: traffic class queue belongs to * @q_handle: qidx in tc; used in TXQ enable functions * + * ice_iov.c requires the following parameters (when PCI_IOV is defined): + * @itr_idx: ITR index to use for this queue + * * Other parameters may be iflib driver specific */ struct ice_tx_queue { @@ -153,6 +156,9 @@ struct ice_tx_queue { u32 me; u16 q_handle; u8 tc; +#ifdef PCI_IOV + u8 itr_idx; +#endif /* descriptor writeback status */ qidx_t *tx_rsq; @@ -175,6 +181,9 @@ struct ice_tx_queue { * @stats: queue statistics * @tc: traffic class queue belongs to * + * ice_iov.c requires the following parameters (when PCI_IOV is defined): + * @itr_idx: ITR index to use for this queue + * * Other parameters may be iflib driver specific */ struct ice_rx_queue { @@ -187,6 +196,9 @@ struct ice_rx_queue { struct ice_irq_vector *irqv; u32 me; u8 tc; +#ifdef PCI_IOV + u8 itr_idx; +#endif struct if_irq que_irq; }; @@ -332,6 +344,10 @@ struct ice_softc { ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT); ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT); +#ifdef PCI_IOV + struct ice_vf *vfs; + u16 num_vfs; +#endif struct ice_resmgr os_imgr; /* For mirror interface */ struct ice_mirr_if *mirr_if; diff --git a/sys/dev/ice/ice_iov.c b/sys/dev/ice/ice_iov.c new file mode 100644 index 000000000000..c5a3e1060e44 --- /dev/null +++ b/sys/dev/ice/ice_iov.c @@ -0,0 +1,1856 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2025, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file ice_iov.c + * @brief Virtualization support functions + * + * Contains functions for enabling and managing PCIe virtual function devices, + * including enabling new VFs, and managing VFs over the virtchnl interface. + */ + +#include "ice_iov.h" + +static struct ice_vf *ice_iov_get_vf(struct ice_softc *sc, int vf_num); +static void ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf); +static void ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, + bool trigger_vflr); +static void ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf); + +static void ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static bool ice_vc_isvalid_ring_len(u16 ring_len); +static void ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf); +static void ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, + struct virtchnl_eth_stats *vstats); +static void ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static void ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf); +static enum virtchnl_status_code ice_iov_err_to_virt_err(int ice_err); +static int ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr); + +/** + * ice_iov_attach - Initialize SR-IOV PF host support + * @sc: device softc structure + * + * Initialize SR-IOV PF host support at the end of the driver attach process. + * + * @pre Must be called from sleepable context (calls malloc() w/ M_WAITOK) + * + * @returns 0 if successful, or + * - ENOMEM if there is no memory for the PF/VF schemas or iov device + * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV + * version as the kernel + * - ENOENT if the device doesn't have the SR-IOV capability + */ +int +ice_iov_attach(struct ice_softc *sc) +{ + device_t dev = sc->dev; + nvlist_t *pf_schema, *vf_schema; + int error; + + pf_schema = pci_iov_schema_alloc_node(); + vf_schema = pci_iov_schema_alloc_node(); + + pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); + pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", + IOV_SCHEMA_HASDEFAULT, TRUE); + pci_iov_schema_add_bool(vf_schema, "allow-set-mac", + IOV_SCHEMA_HASDEFAULT, FALSE); + pci_iov_schema_add_bool(vf_schema, "allow-promisc", + IOV_SCHEMA_HASDEFAULT, FALSE); + pci_iov_schema_add_uint16(vf_schema, "num-queues", + IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_QUEUES); + pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi", + IOV_SCHEMA_HASDEFAULT, ICE_INVALID_MIRROR_VSI); + pci_iov_schema_add_uint16(vf_schema, "max-vlan-allowed", + IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_VLAN_LIMIT); + pci_iov_schema_add_uint16(vf_schema, "max-mac-filters", + IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_FILTER_LIMIT); + + error = pci_iov_attach(dev, pf_schema, vf_schema); + if (error != 0) { + device_printf(dev, + "pci_iov_attach failed (error=%s)\n", + ice_err_str(error)); + ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); + } else + ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en); + + return (error); +} + +/** + * ice_iov_detach - Teardown SR-IOV PF host support + * @sc: device softc structure + * + * Teardown SR-IOV PF host support at the start of the driver detach process. + * + * @returns 0 if successful or IOV support hasn't been setup, or + * - EBUSY if VFs still exist + */ +int +ice_iov_detach(struct ice_softc *sc) +{ + device_t dev = sc->dev; + int error; + + error = pci_iov_detach(dev); + if (error != 0) { + device_printf(dev, + "pci_iov_detach failed (error=%s)\n", + ice_err_str(error)); + } + + return (error); +} + +/** + * ice_iov_init - Called by the OS before the first VF is created. + * @sc: device softc structure + * @num_vfs: number of VFs to setup resources for + * @params: configuration parameters for the PF + * + * @returns 0 if successful or an error code on failure + */ +int +ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params __unused) +{ + /* Allocate array of VFs, for tracking */ + sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT | + M_ZERO); + if (sc->vfs == NULL) + return (ENOMEM); + + /* Initialize each VF with basic information */ + for (int i = 0; i < num_vfs; i++) + sc->vfs[i].vf_num = i; + + /* Save off number of configured VFs */ + sc->num_vfs = num_vfs; + + return (0); +} + +/** + * ice_iov_get_vf - Get pointer to VF at given index + * @sc: device softc structure + * @vf_num: Index of VF to retrieve + * + * @remark will throw an assertion if vf_num is not in the + * range of allocated VFs + * + * @returns a pointer to the VF structure at the given index + */ +static struct ice_vf * +ice_iov_get_vf(struct ice_softc *sc, int vf_num) +{ + MPASS(vf_num < sc->num_vfs); + + return &sc->vfs[vf_num]; +} + +/** + * ice_iov_add_vf - Called by the OS for each VF to create + * @sc: device softc structure + * @vfnum: index of VF to configure + * @params: configuration parameters for the VF + * + * @returns 0 if successful or an error code on failure + */ +int +ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params) +{ + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + device_t dev = sc->dev; + struct ice_vsi *vsi; + struct ice_vf *vf; + int vf_num_queues; + const void *mac; + size_t size; + int error; + int i; + + vf = ice_iov_get_vf(sc, vfnum); + vf->vf_flags = VF_FLAG_ENABLED; + + /* This VF needs at least one VSI */ + vsi = ice_alloc_vsi(sc, ICE_VSI_VF); + if (vsi == NULL) + return (ENOMEM); + vf->vsi = vsi; + vsi->vf_num = vfnum; + + vf_num_queues = nvlist_get_number(params, "num-queues"); + /* Validate and clamp value if invalid */ + if (vf_num_queues < 1 || vf_num_queues > ICE_MAX_SCATTERED_QUEUES) + device_printf(dev, "Invalid num-queues (%d) for VF %d\n", + vf_num_queues, vf->vf_num); + if (vf_num_queues < 1) { + device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); + vf_num_queues = 1; + } else if (vf_num_queues > ICE_MAX_SCATTERED_QUEUES) { + device_printf(dev, "Setting VF %d num-queues to %d\n", + vf->vf_num, ICE_MAX_SCATTERED_QUEUES); + vf_num_queues = ICE_MAX_SCATTERED_QUEUES; + } + vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; + + /* Reserve VF queue allocation from PF queues */ + ice_alloc_vsi_qmap(vsi, vf_num_queues, vf_num_queues); + vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues; + + /* Assign Tx queues from PF space */ + error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, + vsi->num_tx_queues); + if (error) { + device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n", + ice_err_str(error)); + goto release_vsi; + } + + /* Assign Rx queues from PF space */ + error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, + vsi->num_rx_queues); + if (error) { + device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n", + ice_err_str(error)); + goto release_vsi; + } + + vsi->max_frame_size = ICE_MAX_FRAME_SIZE; + + /* Allocate queue structure memory */ + vsi->tx_queues = (struct ice_tx_queue *) + malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE, + M_NOWAIT | M_ZERO); + if (!vsi->tx_queues) { + device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n", + vfnum); + error = ENOMEM; + goto release_vsi; + } + for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { + txq->me = i; + txq->vsi = vsi; + } + + /* Allocate queue structure memory */ + vsi->rx_queues = (struct ice_rx_queue *) + malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE, + M_NOWAIT | M_ZERO); + if (!vsi->rx_queues) { + device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n", + vfnum); + error = ENOMEM; + goto free_txqs; + } + for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) { + rxq->me = i; + rxq->vsi = vsi; + } + + /* Allocate space to store the IRQ vector data */ + vf->num_irq_vectors = vf_num_queues + 1; + vf->tx_irqvs = (struct ice_irq_vector *) + malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), + M_ICE, M_NOWAIT); + if (!vf->tx_irqvs) { + device_printf(sc->dev, + "Unable to allocate TX irqv memory for VF-%d's %d vectors\n", + vfnum, vf->num_irq_vectors); + error = ENOMEM; + goto free_rxqs; + } + vf->rx_irqvs = (struct ice_irq_vector *) + malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), + M_ICE, M_NOWAIT); + if (!vf->rx_irqvs) { + device_printf(sc->dev, + "Unable to allocate RX irqv memory for VF-%d's %d vectors\n", + vfnum, vf->num_irq_vectors); + error = ENOMEM; + goto free_txirqvs; + } + + /* Assign VF interrupts from PF space */ + if (!(vf->vf_imap = + (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors, + M_ICE, M_NOWAIT))) { + device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum); + error = ENOMEM; + goto free_rxirqvs; + } + error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors); + if (error) { + device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n", + vfnum, ice_err_str(error)); + goto free_imap; + } + + if (nvlist_exists_binary(params, "mac-addr")) { + mac = nvlist_get_binary(params, "mac-addr", &size); + memcpy(vf->mac, mac, ETHER_ADDR_LEN); + + if (nvlist_get_bool(params, "allow-set-mac")) + vf->vf_flags |= VF_FLAG_SET_MAC_CAP; + } else + /* + * If the administrator has not specified a MAC address then + * we must allow the VF to choose one. + */ + vf->vf_flags |= VF_FLAG_SET_MAC_CAP; + + if (nvlist_get_bool(params, "mac-anti-spoof")) + vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; + + if (nvlist_get_bool(params, "allow-promisc")) + vf->vf_flags |= VF_FLAG_PROMISC_CAP; + + vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi"); + + vf->vlan_limit = nvlist_get_number(params, "max-vlan-allowed"); + vf->mac_filter_limit = nvlist_get_number(params, "max-mac-filters"); + + vf->vf_flags |= VF_FLAG_VLAN_CAP; + + /* Create and setup VSI in HW */ + error = ice_initialize_vsi(vsi); + if (error) { + device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n", + vfnum, ice_err_str(error)); + goto release_imap; + } + + /* Add the broadcast address */ + error = ice_add_vsi_mac_filter(vsi, broadcastaddr); + if (error) { + device_printf(sc->dev, "Unable to add broadcast filter VF %d VSI: %s\n", + vfnum, ice_err_str(error)); + goto release_imap; + } + + ice_iov_ready_vf(sc, vf); + + return (0); + +release_imap: + ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap, + vf->num_irq_vectors); +free_imap: + free(vf->vf_imap, M_ICE); + vf->vf_imap = NULL; +free_rxirqvs: + free(vf->rx_irqvs, M_ICE); + vf->rx_irqvs = NULL; +free_txirqvs: + free(vf->tx_irqvs, M_ICE); + vf->tx_irqvs = NULL; +free_rxqs: + free(vsi->rx_queues, M_ICE); + vsi->rx_queues = NULL; +free_txqs: + free(vsi->tx_queues, M_ICE); + vsi->tx_queues = NULL; +release_vsi: + ice_release_vsi(vsi); + vf->vsi = NULL; + return (error); +} + +/** + * ice_iov_uninit - Called by the OS when VFs are destroyed + * @sc: device softc structure + */ +void +ice_iov_uninit(struct ice_softc *sc) +{ + struct ice_vf *vf; + struct ice_vsi *vsi; + + /* Release per-VF resources */ + for (int i = 0; i < sc->num_vfs; i++) { + vf = &sc->vfs[i]; + vsi = vf->vsi; + + /* Free VF interrupt reservation */ + if (vf->vf_imap) { + free(vf->vf_imap, M_ICE); + vf->vf_imap = NULL; + } + + /* Free queue interrupt mapping trackers */ + if (vf->tx_irqvs) { + free(vf->tx_irqvs, M_ICE); + vf->tx_irqvs = NULL; + } + if (vf->rx_irqvs) { + free(vf->rx_irqvs, M_ICE); + vf->rx_irqvs = NULL; + } + + if (!vsi) + continue; + + /* Free VSI queues */ + if (vsi->tx_queues) { + free(vsi->tx_queues, M_ICE); + vsi->tx_queues = NULL; + } + if (vsi->rx_queues) { + free(vsi->rx_queues, M_ICE); + vsi->rx_queues = NULL; + } + + ice_release_vsi(vsi); + vf->vsi = NULL; + } + + /* Release memory used for VF tracking */ + if (sc->vfs) { + free(sc->vfs, M_ICE); + sc->vfs = NULL; + } + sc->num_vfs = 0; +} + +/** + * ice_iov_handle_vflr - Process VFLR event + * @sc: device softc structure + * + * Identifys which VFs have been reset and re-configure + * them. + */ +void +ice_iov_handle_vflr(struct ice_softc *sc) +{ + struct ice_hw *hw = &sc->hw; + struct ice_vf *vf; + u32 reg, reg_idx, bit_idx; + + for (int i = 0; i < sc->num_vfs; i++) { + vf = &sc->vfs[i]; + + reg_idx = (hw->func_caps.vf_base_id + vf->vf_num) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_num) % 32; + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); + if (reg & BIT(bit_idx)) + ice_reset_vf(sc, vf, false); + } +} + +/** + * ice_iov_ready_vf - Setup VF interrupts and mark it as ready + * @sc: device softc structure + * @vf: driver's VF structure for the VF to update + * + * Clears VF reset triggering bit, sets up the PF<->VF interrupt + * mapping and marks the VF as active in the HW so that the VF + * driver can use it. + */ +static void +ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf) +{ + struct ice_hw *hw = &sc->hw; + u32 reg; + + /* Clear the triggering bit */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); + reg &= ~VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); + + /* Setup VF interrupt allocation and mapping */ + ice_iov_setup_intr_mapping(sc, vf); + + /* Indicate to the VF that reset is done */ + wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); + + ice_flush(hw); +} + +/** + * ice_reset_vf - Perform a hardware reset (VFR) on a VF + * @sc: device softc structure + * @vf: driver's VF structure for VF to be reset + * @trigger_vflr: trigger a reset or only handle already executed reset + * + * Performs a VFR for the given VF. This function busy waits until the + * reset completes in the HW, notifies the VF that the reset is done + * by setting a bit in a HW register, then returns. + * + * @remark This also sets up the PF<->VF interrupt mapping and allocations in + * the hardware after the hardware reset is finished, via + * ice_iov_setup_intr_mapping() + */ +static void +ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, bool trigger_vflr) +{ + u16 global_vf_num, reg_idx, bit_idx; + struct ice_hw *hw = &sc->hw; + int status; + u32 reg; + int i; + + global_vf_num = vf->vf_num + hw->func_caps.vf_base_id; + + if (trigger_vflr) { + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); + reg |= VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); + } + + /* clear the VFLR bit for the VF in a GLGEN_VFLRSTAT register */ + reg_idx = (global_vf_num) / 32; + bit_idx = (global_vf_num) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + ice_flush(hw); + + /* Wait until there are no pending PCI transactions */ + wr32(hw, PF_PCI_CIAA, + ICE_PCIE_DEV_STATUS | (global_vf_num << PF_PCI_CIAA_VF_NUM_S)); + + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { + reg = rd32(hw, PF_PCI_CIAD); + if (!(reg & PCIEM_STA_TRANSACTION_PND)) + break; + + DELAY(ICE_PCI_CIAD_WAIT_DELAY_US); + } + if (i == ICE_PCI_CIAD_WAIT_COUNT) + device_printf(sc->dev, + "VF-%d PCI transactions stuck\n", vf->vf_num); + + /* Disable TX queues, which is required during VF reset */ + status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_num, NULL); + if (status) + device_printf(sc->dev, + "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + + /* Then check for the VF reset to finish in HW */ + for (i = 0; i < ICE_VPGEN_VFRSTAT_WAIT_COUNT; i++) { + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num)); + if ((reg & VPGEN_VFRSTAT_VFRD_M)) + break; + + DELAY(ICE_VPGEN_VFRSTAT_WAIT_DELAY_US); + } + if (i == ICE_VPGEN_VFRSTAT_WAIT_COUNT) + device_printf(sc->dev, + "VF-%d Reset is stuck\n", vf->vf_num); + + ice_iov_ready_vf(sc, vf); +} + +/** + * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF + * @sc: device private structure + * @vf: VF tracking structure + * @msg_buf: raw message buffer from the VF + * + * Receives a message from the VF listing its supported capabilities, and + * replies to the VF with information about what resources the PF has + * allocated for the VF. + * + * @remark This always replies to the VF with a success status; it does not + * fail. It's up to the VF driver to reject or complain about the PF's response. + */ +static void +ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; + u16 vf_res_len; + u32 vf_caps; + + /* XXX: Only support one VSI per VF, so this size doesn't need adjusting */ + vf_res_len = sizeof(struct virtchnl_vf_resource); + vf_res = (struct virtchnl_vf_resource *)malloc(vf_res_len, M_ICE, + M_WAITOK | M_ZERO); + + vf_res->num_vsis = 1; + vf_res->num_queue_pairs = vf->vsi->num_tx_queues; + vf_res->max_vectors = vf_res->num_queue_pairs + 1; + + vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE; + vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vf_res->max_mtu = 0; + + vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS; + if (msg_buf != NULL) { + vf_caps = *((u32 *)(msg_buf)); + + if (vf_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + + if (vf_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + } + + vsi_res = &vf_res->vsi_res[0]; + vsi_res->vsi_id = vf->vsi->idx; + vsi_res->num_queue_pairs = vf->vsi->num_tx_queues; + vsi_res->vsi_type = VIRTCHNL_VSI_SRIOV; + vsi_res->qset_handle = 0; + if (!ETHER_IS_ZERO(vf->mac)) + memcpy(vsi_res->default_mac_addr, vf->mac, ETHER_ADDR_LEN); + + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_STATUS_SUCCESS, (u8 *)vf_res, vf_res_len, NULL); + + free(vf_res, M_ICE); +} + +/** + * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF + * @sc: device private structure + * @vf: VF tracking structure + * @msg_buf: raw message buffer from the VF + * + * Receives a version message from the VF, and responds to the VF with + * the version number that the PF will use. + * + * @remark This always replies to the VF with a success status; it does not + * fail. + */ +static void +ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct virtchnl_version_info *recv_vf_version; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + + recv_vf_version = (struct virtchnl_version_info *)msg_buf; + + /* VFs running the 1.0 API expect to get 1.0 back */ + if (VF_IS_V10(recv_vf_version)) { + vf->version.major = 1; + vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + } else { + vf->version.major = VIRTCHNL_VERSION_MAJOR; + vf->version.minor = VIRTCHNL_VERSION_MINOR; + + if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) || + (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR)) + device_printf(dev, + "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n", + __func__, vf->vf_num, + recv_vf_version->major, recv_vf_version->minor, + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); + } + + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version), + NULL); +} + +/** + * ice_vf_validate_mac - Validate MAC address before adding it + * @vf: VF tracking structure + * @addr: MAC address to validate + * + * Validate a MAC address before adding it to a VF during the handling + * of a VIRTCHNL_OP_ADD_ETH_ADDR operation. Notably, this also checks if + * the VF is allowed to set its own arbitrary MAC addresses. + * + * Returns 0 if MAC address is valid for the given vf + */ +static int +ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr) +{ + + if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr)) + return (EINVAL); + + /* + * If the VF is not allowed to change its MAC address, don't let it + * set a MAC filter for an address that is not a multicast address and + * is not its assigned MAC. + */ + if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && + !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN))) + return (EPERM); + + return (0); +} + +/** + * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF + * @sc: device private structure + * @vf: VF tracking structure + * @msg_buf: raw message buffer from the VF + * + * Receives a list of MAC addresses from the VF and adds those addresses + * to the VSI's filter list. + */ +static void +ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_ether_addr_list *addr_list; + struct ice_hw *hw = &sc->hw; + u16 added_addr_cnt = 0; + int error = 0; + + addr_list = (struct virtchnl_ether_addr_list *)msg_buf; + + if (addr_list->num_elements > + (vf->mac_filter_limit - vf->mac_filter_cnt)) { + v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto done; + } + + for (int i = 0; i < addr_list->num_elements; i++) { + u8 *addr = addr_list->list[i].addr; + + /* The type flag is currently ignored; every MAC address is + * treated as the LEGACY type + */ + + error = ice_vf_validate_mac(vf, addr); + if (error == EPERM) { + device_printf(sc->dev, + "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n", + __func__, vf->vf_num, vf->vsi->idx); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + continue; + } else if (error) { + device_printf(sc->dev, + "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n", + __func__, vf->vf_num, vf->vsi->idx); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + continue; + } + + error = ice_add_vsi_mac_filter(vf->vsi, addr); + if (error) { + device_printf(sc->dev, + "%s: VF-%d: Error adding MAC addr for VSI %d\n", + __func__, vf->vf_num, vf->vsi->idx); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + continue; + } + /* Don't count VF's MAC against its MAC filter limit */ + if (memcmp(addr, vf->mac, ETHER_ADDR_LEN)) + added_addr_cnt++; + } + + vf->mac_filter_cnt += added_addr_cnt; + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF + * @sc: device private structure + * @vf: VF tracking structure + * @msg_buf: raw message buffer from the VF + * + * Receives a list of MAC addresses from the VF and removes those addresses + * from the VSI's filter list. + */ +static void +ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_ether_addr_list *addr_list; + struct ice_hw *hw = &sc->hw; + u16 deleted_addr_cnt = 0; + int error = 0; + + addr_list = (struct virtchnl_ether_addr_list *)msg_buf; + + for (int i = 0; i < addr_list->num_elements; i++) { + error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr); + if (error) { + device_printf(sc->dev, + "%s: VF-%d: Error removing MAC addr for VSI %d\n", + __func__, vf->vf_num, vf->vsi->idx); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + continue; + } + /* Don't count VF's MAC against its MAC filter limit */ + if (memcmp(addr_list->list[i].addr, vf->mac, ETHER_ADDR_LEN)) + deleted_addr_cnt++; + } + + if (deleted_addr_cnt >= vf->mac_filter_cnt) + vf->mac_filter_cnt = 0; + else + vf->mac_filter_cnt -= deleted_addr_cnt; + + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Adds the VLANs in msg_buf to the VF's VLAN filter list. + */ +static void +ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_vlan_filter_list *vlan_list; + int status = 0; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + + vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; + + if (vlan_list->vsi_id != vsi->idx) { + device_printf(sc->dev, + "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + vf->vf_num, vsi->idx, vlan_list->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + if (vlan_list->num_elements > (vf->vlan_limit - vf->vlan_cnt)) { + v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto done; + } + + status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id, + vlan_list->num_elements); + if (status) { + device_printf(sc->dev, + "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n", + vf->vf_num, vsi->idx, ice_status_str(status), + ice_aq_str(sc->hw.adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + + vf->vlan_cnt += vlan_list->num_elements; + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Removes the VLANs in msg_buf from the VF's VLAN filter list. + */ +static void +ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_vlan_filter_list *vlan_list; + int status = 0; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + + vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; + + if (vlan_list->vsi_id != vsi->idx) { + device_printf(sc->dev, + "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + vf->vf_num, vsi->idx, vlan_list->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id, + vlan_list->num_elements); + if (status) { + device_printf(sc->dev, + "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n", + vf->vf_num, vsi->idx, ice_status_str(status), + ice_aq_str(sc->hw.adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + + if (vlan_list->num_elements >= vf->vlan_cnt) + vf->vlan_cnt = 0; + else + vf->vlan_cnt -= vlan_list->num_elements; + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid + * @ring_len: length of ring + * + * Check whether a ring size value is valid. + * + * @returns true if given ring size is valid + */ +static bool +ice_vc_isvalid_ring_len(u16 ring_len) +{ + return (ring_len >= ICE_MIN_DESC_COUNT && + ring_len <= ICE_MAX_DESC_COUNT && + !(ring_len % ICE_DESC_COUNT_INCR)); +} + +/** + * ice_vc_cfg_vsi_qs_msg - Handle VIRTCHNL_OP_CONFIG_VSI_QUEUES msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + */ +static void +ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + device_t dev = sc->dev; + struct ice_hw *hw = &sc->hw; + struct virtchnl_vsi_queue_config_info *vqci; + struct virtchnl_queue_pair_info *vqpi; + enum virtchnl_status_code status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + int i, error = 0; + + vqci = (struct virtchnl_vsi_queue_config_info *)msg_buf; + + if (vqci->num_queue_pairs > vf->vsi->num_tx_queues && + vqci->num_queue_pairs > vf->vsi->num_rx_queues) { + status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + ice_vsi_disable_tx(vf->vsi); + ice_control_all_rx_queues(vf->vsi, false); + + /* + * Clear TX and RX queues config in case VF + * requests different number of queues. + */ + for (i = 0; i < vsi->num_tx_queues; i++) { + txq = &vsi->tx_queues[i]; + + txq->desc_count = 0; + txq->tx_paddr = 0; + txq->tc = 0; + } + + for (i = 0; i < vsi->num_rx_queues; i++) { + rxq = &vsi->rx_queues[i]; + + rxq->desc_count = 0; + rxq->rx_paddr = 0; + } + + vqpi = vqci->qpair; + for (i = 0; i < vqci->num_queue_pairs; i++, vqpi++) { + /* Initial parameter validation */ + if (vqpi->txq.vsi_id != vf->vsi->idx || + vqpi->rxq.vsi_id != vf->vsi->idx || + vqpi->txq.queue_id != vqpi->rxq.queue_id || + vqpi->txq.headwb_enabled || + vqpi->rxq.splithdr_enabled || + vqpi->rxq.crc_disable || + !(ice_vc_isvalid_ring_len(vqpi->txq.ring_len)) || + !(ice_vc_isvalid_ring_len(vqpi->rxq.ring_len))) { + status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + /* Copy parameters into VF's queue/VSI structs */ + txq = &vsi->tx_queues[vqpi->txq.queue_id]; + + txq->desc_count = vqpi->txq.ring_len; + txq->tx_paddr = vqpi->txq.dma_ring_addr; + txq->q_handle = vqpi->txq.queue_id; + txq->tc = 0; + + rxq = &vsi->rx_queues[vqpi->rxq.queue_id]; + + rxq->desc_count = vqpi->rxq.ring_len; + rxq->rx_paddr = vqpi->rxq.dma_ring_addr; + vsi->mbuf_sz = vqpi->rxq.databuffer_size; + } + + /* Configure TX queues in HW */ + error = ice_cfg_vsi_for_tx(vsi); + if (error) { + device_printf(dev, + "VF-%d: Unable to configure VSI for Tx: %s\n", + vf->vf_num, ice_err_str(error)); + status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto done; + } + + /* Configure RX queues in HW */ + error = ice_cfg_vsi_for_rx(vsi); + if (error) { + device_printf(dev, + "VF-%d: Unable to configure VSI for Rx: %s\n", + vf->vf_num, ice_err_str(error)); + status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + ice_vsi_disable_tx(vsi); + goto done; + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + status, NULL, 0, NULL); +} + +/** + * ice_vc_cfg_rss_key_msg - Handle VIRTCHNL_OP_CONFIG_RSS_KEY msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Sets the RSS key for the given VF, using the contents of msg_buf. + */ +static void +ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_aqc_get_set_rss_keys keydata = + { .standard_rss_key = {0}, .extended_hash_key = {0} }; + struct ice_hw *hw = &sc->hw; + struct virtchnl_rss_key *vrk; + int status = 0; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + + vrk = (struct virtchnl_rss_key *)msg_buf; + + if (vrk->vsi_id != vsi->idx) { + device_printf(sc->dev, + "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + vf->vf_num, vsi->idx, vrk->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + if ((vrk->key_len > + (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + + ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)) || + vrk->key_len == 0) { + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + memcpy(&keydata, vrk->key, vrk->key_len); + + status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); + if (status) { + device_printf(sc->dev, + "ice_aq_set_rss_key status %s, error %s\n", + ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_KEY, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_cfg_rss_lut_msg - Handle VIRTCHNL_OP_CONFIG_RSS_LUT msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Adds the LUT from the VF in msg_buf to the PF via an admin queue call. + */ +static void +ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_rss_lut *vrl; + int status = 0; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_aq_get_set_rss_lut_params lut_params = {}; + struct ice_vsi *vsi = vf->vsi; + + vrl = (struct virtchnl_rss_lut *)msg_buf; + + if (vrl->vsi_id != vsi->idx) { + device_printf(sc->dev, + "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + vf->vf_num, vsi->idx, vrl->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + if (vrl->lut_entries > ICE_VSIQF_HLUT_ARRAY_SIZE) { + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + lut_params.vsi_handle = vsi->idx; + lut_params.lut_size = vsi->rss_table_size; + lut_params.lut_type = vsi->rss_lut_type; + lut_params.lut = vrl->lut; + lut_params.global_lut_id = 0; + + status = ice_aq_set_rss_lut(hw, &lut_params); + if (status) { + device_printf(sc->dev, + "VF-%d: Cannot set RSS lut, err %s aq_err %s\n", + vf->vf_num, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_LUT, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_set_rss_hena_msg - Handle VIRTCHNL_OP_SET_RSS_HENA msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Adds the VF's hena (hash enable) bits as flow types to the PF's RSS flow + * type list. + */ +static void +ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_rss_hena *vrh; + int status = 0; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + + MPASS(vsi != NULL); + + vrh = (struct virtchnl_rss_hena *)msg_buf; + + /* + * Remove existing configuration to make sure only requested + * config is applied and allow VFs to disable RSS completly. + */ + status = ice_rem_vsi_rss_cfg(hw, vsi->idx); + if (vrh->hena) { + /* + * Problem with removing config is not fatal, when new one + * is requested. Warn about it but try to apply new config + * anyway. + */ + if (status) + device_printf(sc->dev, + "ice_rem_vsi_rss_cfg status %s, error %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + status = ice_add_avf_rss_cfg(hw, vsi->idx, vrh->hena); + if (status) + device_printf(sc->dev, + "ice_add_avf_rss_cfg status %s, error %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } + v_status = ice_iov_err_to_virt_err(status); + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_SET_RSS_HENA, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_enable_queues_msg - Handle VIRTCHNL_OP_ENABLE_QUEUES msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Enables VF queues selected in msg_buf for Tx/Rx traffic. + * + * @remark Only actually operates on Rx queues; Tx queues are enabled in + * CONFIG_VSI_QUEUES message handler. + */ +static void +ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_queue_select *vqs; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + int bit, error = 0; + + vqs = (struct virtchnl_queue_select *)msg_buf; + + if (vqs->vsi_id != vsi->idx) { + device_printf(sc->dev, + "%s: VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + __func__, vf->vf_num, vsi->idx, vqs->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + device_printf(sc->dev, + "%s: VF-%d: message queue masks are empty\n", + __func__, vf->vf_num); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + /* Validate rx_queue mask */ + bit = fls(vqs->rx_queues); + if (bit > vsi->num_rx_queues) { + device_printf(sc->dev, + "%s: VF-%d: message's rx_queues map (0x%08x) has invalid bit set (%d)\n", + __func__, vf->vf_num, vqs->rx_queues, bit); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + /* Tx ring enable is handled in an earlier message. */ + for_each_set_bit(bit, &vqs->rx_queues, 32) { + error = ice_control_rx_queue(vsi, bit, true); + if (error) { + device_printf(sc->dev, + "Unable to enable Rx ring %d for receive: %s\n", + bit, ice_err_str(error)); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ENABLE_QUEUES, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_disable_queues_msg - Handle VIRTCHNL_OP_DISABLE_QUEUES msg + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Disables all VF queues for the VF's VSI. + * + * @remark Unlike the ENABLE_QUEUES handler, this operates on both + * Tx and Rx queues + */ +static void +ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, + u8 *msg_buf __unused) +{ + struct ice_hw *hw = &sc->hw; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + int error = 0; + + error = ice_control_all_rx_queues(vsi, false); + if (error) { + device_printf(sc->dev, + "Unable to disable Rx rings for transmit: %s\n", + ice_err_str(error)); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + error = ice_vsi_disable_tx(vsi); + if (error) { + /* Already prints an error message */ + v_status = VIRTCHNL_STATUS_ERR_PARAM; + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DISABLE_QUEUES, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_cfg_irq_map_msg - Handle VIRTCHNL_OP_CFG_IRQ_MAP msg from VF + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Configures the interrupt vectors described in the message in msg_buf. The + * VF needs to send this message during init, so that queues can be allowed + * to generate interrupts. + */ +static void +ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ +#define ICE_VIRTCHNL_QUEUE_MAP_SIZE 16 + struct ice_hw *hw = &sc->hw; + struct virtchnl_irq_map_info *vimi; + struct virtchnl_vector_map *vvm; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + struct ice_vsi *vsi = vf->vsi; + u16 vector; + + vimi = (struct virtchnl_irq_map_info *)msg_buf; + + if (vimi->num_vectors > vf->num_irq_vectors) { + device_printf(sc->dev, + "%s: VF-%d: message has more vectors (%d) than configured for VF (%d)\n", + __func__, vf->vf_num, vimi->num_vectors, vf->num_irq_vectors); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + vvm = vimi->vecmap; + /* Save off information from message */ + for (int i = 0; i < vimi->num_vectors; i++, vvm++) { + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + int bit; + + if (vvm->vsi_id != vf->vsi->idx) { + device_printf(sc->dev, + "%s: VF-%d: message's VSI ID (%d) does not match VF's (%d) for vector %d\n", + __func__, vf->vf_num, vvm->vsi_id, vf->vsi->idx, i); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + /* vvm->vector_id is relative to VF space */ + vector = vvm->vector_id; + + if (vector >= vf->num_irq_vectors) { + device_printf(sc->dev, + "%s: VF-%d: message's vector ID (%d) is greater than VF's max ID (%d)\n", + __func__, vf->vf_num, vector, vf->num_irq_vectors - 1); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + /* The Misc/Admin Queue vector doesn't need mapping */ + if (vector == 0) + continue; + + /* coverity[address_of] */ + for_each_set_bit(bit, &vvm->txq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { + if (bit >= vsi->num_tx_queues) { + device_printf(sc->dev, + "%s: VF-%d: txq map has invalid bit set\n", + __func__, vf->vf_num); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + vf->tx_irqvs[vector].me = vector; + + txq = &vsi->tx_queues[bit]; + txq->irqv = &vf->tx_irqvs[vector]; + txq->itr_idx = vvm->txitr_idx; + } + /* coverity[address_of] */ + for_each_set_bit(bit, &vvm->rxq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { + if (bit >= vsi->num_rx_queues) { + device_printf(sc->dev, + "%s: VF-%d: rxq map has invalid bit set\n", + __func__, vf->vf_num); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + vf->rx_irqvs[vector].me = vector; + + rxq = &vsi->rx_queues[bit]; + rxq->irqv = &vf->rx_irqvs[vector]; + rxq->itr_idx = vvm->rxitr_idx; + } + } + + /* Write to T/RQCTL registers to actually map vectors to queues */ + for (int i = 0; i < vf->vsi->num_rx_queues; i++) + if (vsi->rx_queues[i].irqv != NULL) + ice_configure_rxq_interrupt(hw, vsi->rx_qmap[i], + vsi->rx_queues[i].irqv->me, vsi->rx_queues[i].itr_idx); + + for (int i = 0; i < vf->vsi->num_tx_queues; i++) + if (vsi->tx_queues[i].irqv != NULL) + ice_configure_txq_interrupt(hw, vsi->tx_qmap[i], + vsi->tx_queues[i].irqv->me, vsi->tx_queues[i].itr_idx); + + ice_flush(hw); + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_IRQ_MAP, + v_status, NULL, 0, NULL); +} + +/** + * ice_eth_stats_to_virtchnl_eth_stats - Convert stats for virtchnl + * @istats: VSI stats from HW to convert + * @vstats: stats struct to copy to + * + * This function copies all known stats in struct virtchnl_eth_stats from the + * input struct ice_eth_stats to an output struct virtchnl_eth_stats. + * + * @remark These two structure types currently have the same definition up to + * the size of struct virtchnl_eth_stats (on FreeBSD), but that could change + * in the future. + */ +static void +ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, + struct virtchnl_eth_stats *vstats) +{ + vstats->rx_bytes = istats->rx_bytes; + vstats->rx_unicast = istats->rx_unicast; + vstats->rx_multicast = istats->rx_multicast; + vstats->rx_broadcast = istats->rx_broadcast; + vstats->rx_discards = istats->rx_discards; + vstats->rx_unknown_protocol = istats->rx_unknown_protocol; + vstats->tx_bytes = istats->tx_bytes; + vstats->tx_unicast = istats->tx_unicast; + vstats->tx_multicast = istats->tx_multicast; + vstats->tx_broadcast = istats->tx_broadcast; + vstats->tx_discards = istats->tx_discards; + vstats->tx_errors = istats->tx_errors; +} + +/** + * ice_vc_get_stats_msg - Handle VIRTCHNL_OP_GET_STATS msg + * @sc: device private structure + * @vf: VF tracking structure + * @msg_buf: raw message buffer from the VF + * + * Updates the VF's VSI stats and sends those stats back to the VF. + */ +static void +ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct virtchnl_queue_select *vqs; + struct virtchnl_eth_stats stats; + struct ice_vsi *vsi = vf->vsi; + struct ice_hw *hw = &sc->hw; + + vqs = (struct virtchnl_queue_select *)msg_buf; + + if (vqs->vsi_id != vsi->idx) { + device_printf(sc->dev, + "%s: VF-%d: message has invalid VSI ID %d (VF has VSI ID %d)\n", + __func__, vf->vf_num, vqs->vsi_id, vsi->idx); + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, + VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); + } + + ice_update_vsi_hw_stats(vf->vsi); + ice_eth_stats_to_virtchnl_eth_stats(&vsi->hw_stats.cur, &stats); + + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&stats, + sizeof(struct virtchnl_eth_stats), NULL); +} + +/** + * ice_vc_cfg_promisc_mode_msg - Handle VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * @sc: PF's softc structure + * @vf: VF tracking structure + * @msg_buf: message buffer from VF + * + * Configures the promiscuous modes for the given VSI in msg_buf. + */ +static void +ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) +{ + struct ice_hw *hw = &sc->hw; + struct virtchnl_promisc_info *vpi; + enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; + int status = 0; + struct ice_vsi *vsi = vf->vsi; + ice_declare_bitmap(old_promisc_mask, ICE_PROMISC_MAX); + ice_declare_bitmap(req_promisc_mask, ICE_PROMISC_MAX); + ice_declare_bitmap(clear_promisc_mask, ICE_PROMISC_MAX); + ice_declare_bitmap(set_promisc_mask, ICE_PROMISC_MAX); + ice_declare_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); + u16 vid; + + vpi = (struct virtchnl_promisc_info *)msg_buf; + + /* Check to see if VF has permission to configure promiscuous mode */ + if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { + device_printf(sc->dev, + "VF-%d: attempted to configure promiscuous mode\n", + vf->vf_num); + /* Don't reply to VF with an error */ + goto done; + } + + if (vpi->vsi_id != vsi->idx) { + device_printf(sc->dev, + "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", + vf->vf_num, vsi->idx, vpi->vsi_id); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + } + + if (vpi->flags & ~ICE_VIRTCHNL_VALID_PROMISC_FLAGS) { + device_printf(sc->dev, + "VF-%d: Message has invalid promiscuous flags set (valid 0x%02x, got 0x%02x)\n", + vf->vf_num, ICE_VIRTCHNL_VALID_PROMISC_FLAGS, + vpi->flags); + v_status = VIRTCHNL_STATUS_ERR_PARAM; + goto done; + + } + + ice_zero_bitmap(req_promisc_mask, ICE_PROMISC_MAX); + /* Convert virtchnl flags to ice AQ promiscuous mode flags */ + if (vpi->flags & FLAG_VF_UNICAST_PROMISC) { + ice_set_bit(ICE_PROMISC_UCAST_TX, req_promisc_mask); + ice_set_bit(ICE_PROMISC_UCAST_RX, req_promisc_mask); + } + if (vpi->flags & FLAG_VF_MULTICAST_PROMISC) { + ice_set_bit(ICE_PROMISC_MCAST_TX, req_promisc_mask); + ice_set_bit(ICE_PROMISC_MCAST_RX, req_promisc_mask); + } + + status = ice_get_vsi_promisc(hw, vsi->idx, old_promisc_mask, &vid); + if (status) { + device_printf(sc->dev, + "VF-%d: Failed to get promiscuous mode mask for VSI %d, err %s aq_err %s\n", + vf->vf_num, vsi->idx, + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + + /* Figure out what got added and what got removed */ + ice_zero_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); + ice_xor_bitmap(old_req_xor_mask, old_promisc_mask, req_promisc_mask, ICE_PROMISC_MAX); + ice_and_bitmap(clear_promisc_mask, old_req_xor_mask, old_promisc_mask, ICE_PROMISC_MAX); + ice_and_bitmap(set_promisc_mask, old_req_xor_mask, req_promisc_mask, ICE_PROMISC_MAX); + + if (ice_is_any_bit_set(clear_promisc_mask, ICE_PROMISC_MAX)) { + status = ice_clear_vsi_promisc(hw, vsi->idx, + clear_promisc_mask, 0); + if (status) { + device_printf(sc->dev, + "VF-%d: Failed to clear promiscuous mode for VSI %d, err %s aq_err %s\n", + vf->vf_num, vsi->idx, + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + } + + if (ice_is_any_bit_set(set_promisc_mask, ICE_PROMISC_MAX)) { + status = ice_set_vsi_promisc(hw, vsi->idx, set_promisc_mask, 0); + if (status) { + device_printf(sc->dev, + "VF-%d: Failed to set promiscuous mode for VSI %d, err %s aq_err %s\n", + vf->vf_num, vsi->idx, + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + v_status = ice_iov_err_to_virt_err(status); + goto done; + } + } + +done: + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + v_status, NULL, 0, NULL); +} + +/** + * ice_vc_notify_all_vfs_link_state - Notify all VFs of PF link state + * @sc: device private structure + * + * Sends a message to all VFs about the status of the PF's link + * state. For more details, @see ice_vc_notify_vf_link_state. + */ +void +ice_vc_notify_all_vfs_link_state(struct ice_softc *sc) +{ + for (int i = 0; i < sc->num_vfs; i++) + ice_vc_notify_vf_link_state(sc, &sc->vfs[i]); +} + +/** + * ice_vc_notify_vf_link_state - Notify VF of PF link state + * @sc: device private structure + * @vf: VF tracking structure + * + * Sends an event message to the specified VF with information about + * the current link state from the PF's port. This includes whether + * link is up or down, and the link speed in 100Mbps units. + */ +static void +ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf) +{ + struct virtchnl_pf_event event = {}; + struct ice_hw *hw = &sc->hw; + + event.event = VIRTCHNL_EVENT_LINK_CHANGE; + event.severity = PF_EVENT_SEVERITY_INFO; + event.event_data.link_event_adv.link_status = sc->link_up; + event.event_data.link_event_adv.link_speed = + (u32)ice_conv_link_speed_to_virtchnl(true, + hw->port_info->phy.link_info.link_speed); + + ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&event, sizeof(event), NULL); +} + +/** + * ice_vc_handle_vf_msg - Handle a message from a VF + * @sc: device private structure + * @event: event received from the HW MBX queue + * + * Called whenever an event is received from a VF on the HW mailbox queue. + * Responsible for handling these messages as well as responding to the + * VF afterwards, depending on the received message type. + */ +void +ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event) +{ + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + struct ice_vf *vf; + int err = 0; + + u32 v_opcode = event->desc.cookie_high; + u16 v_id = event->desc.retval; + u8 *msg = event->msg_buf; + u16 msglen = event->msg_len; + + if (v_id >= sc->num_vfs) { + device_printf(dev, "%s: Received msg from invalid VF-%d: opcode %d, len %d\n", + __func__, v_id, v_opcode, msglen); + return; + } + + vf = &sc->vfs[v_id]; + + /* Perform basic checks on the msg */ + err = virtchnl_vc_validate_vf_msg(&vf->version, v_opcode, msg, msglen); + if (err) { + device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n", + __func__, vf->vf_num, v_opcode, msglen, err); + ice_aq_send_msg_to_vf(hw, v_id, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); + return; + } + + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + ice_vc_version_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_RESET_VF: + ice_reset_vf(sc, vf, true); + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + ice_vc_get_vf_res_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + ice_vc_add_eth_addr_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + ice_vc_del_eth_addr_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_ADD_VLAN: + ice_vc_add_vlan_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_DEL_VLAN: + ice_vc_del_vlan_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + ice_vc_cfg_vsi_qs_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + ice_vc_cfg_rss_key_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + ice_vc_cfg_rss_lut_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_SET_RSS_HENA: + ice_vc_set_rss_hena_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + ice_vc_enable_queues_msg(sc, vf, msg); + ice_vc_notify_vf_link_state(sc, vf); + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + ice_vc_disable_queues_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + ice_vc_cfg_irq_map_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_GET_STATS: + ice_vc_get_stats_msg(sc, vf, msg); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ice_vc_cfg_promisc_mode_msg(sc, vf, msg); + break; + default: + device_printf(dev, "%s: Received unknown msg from VF-%d: opcode %d, len %d\n", + __func__, vf->vf_num, v_opcode, msglen); + ice_aq_send_msg_to_vf(hw, v_id, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0, NULL); + break; + } +} + +/** + * ice_iov_setup_intr_mapping - Setup interrupt config for a VF + * @sc: device softc structure + * @vf: driver's VF structure for VF to be configured + * + * Before a VF can be used, and after a VF reset, the PF must configure + * the VF's interrupt allocation registers. This includes allocating + * interrupts from the PF's interrupt pool to the VF using the + * VPINT_ALLOC(_PCI) registers, and setting up a mapping from PF vectors + * to VF vectors in GLINT_VECT2FUNC. + * + * As well, this sets up queue allocation registers and maps the mailbox + * interrupt for the VF. + */ +static void +ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf) +{ + struct ice_hw *hw = &sc->hw; + struct ice_vsi *vsi = vf->vsi; + u16 v; + + /* Calculate indices for register ops below */ + u16 vf_first_irq_idx = vf->vf_imap[0]; + u16 vf_last_irq_idx = (vf_first_irq_idx + vf->num_irq_vectors) - 1; + u16 abs_vf_first_irq_idx = hw->func_caps.common_cap.msix_vector_first_id + + vf_first_irq_idx; + u16 abs_vf_last_irq_idx = (abs_vf_first_irq_idx + vf->num_irq_vectors) - 1; + u16 abs_vf_num = vf->vf_num + hw->func_caps.vf_base_id; + + /* Map out VF interrupt allocation in global device space. Both + * VPINT_ALLOC and VPINT_ALLOC_PCI use the same values. + */ + wr32(hw, VPINT_ALLOC(vf->vf_num), + (((abs_vf_first_irq_idx << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((abs_vf_last_irq_idx << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + VPINT_ALLOC_VALID_M)); + wr32(hw, VPINT_ALLOC_PCI(vf->vf_num), + (((abs_vf_first_irq_idx << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | + ((abs_vf_last_irq_idx << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | + VPINT_ALLOC_PCI_VALID_M)); + + /* Create inverse mapping of vectors to PF/VF combinations */ + for (v = vf_first_irq_idx; v <= vf_last_irq_idx; v++) + { + wr32(hw, GLINT_VECT2FUNC(v), + (((abs_vf_num << GLINT_VECT2FUNC_VF_NUM_S) & GLINT_VECT2FUNC_VF_NUM_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M))); + } + + /* Map mailbox interrupt to MSI-X index 0. Disable ITR for it, too. */ + wr32(hw, VPINT_MBX_CTL(abs_vf_num), + ((0 << VPINT_MBX_CTL_MSIX_INDX_S) & VPINT_MBX_CTL_MSIX_INDX_M) | + ((0x3 << VPINT_MBX_CTL_ITR_INDX_S) & VPINT_MBX_CTL_ITR_INDX_M) | + VPINT_MBX_CTL_CAUSE_ENA_M); + + /* Mark the TX queue mapping registers as valid */ + wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_num), VPLAN_TXQ_MAPENA_TX_ENA_M); + + /* Indicate to HW that VF has scattered queue allocation */ + wr32(hw, VPLAN_TX_QBASE(vf->vf_num), VPLAN_TX_QBASE_VFQTABLE_ENA_M); + for (int i = 0; i < vsi->num_tx_queues; i++) { + wr32(hw, VPLAN_TX_QTABLE(i, vf->vf_num), + (vsi->tx_qmap[i] << VPLAN_TX_QTABLE_QINDEX_S) & VPLAN_TX_QTABLE_QINDEX_M); + } + + /* Mark the RX queue mapping registers as valid */ + wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_num), VPLAN_RXQ_MAPENA_RX_ENA_M); + wr32(hw, VPLAN_RX_QBASE(vf->vf_num), VPLAN_RX_QBASE_VFQTABLE_ENA_M); + for (int i = 0; i < vsi->num_rx_queues; i++) { + wr32(hw, VPLAN_RX_QTABLE(i, vf->vf_num), + (vsi->rx_qmap[i] << VPLAN_RX_QTABLE_QINDEX_S) & VPLAN_RX_QTABLE_QINDEX_M); + } +} + +/** + * ice_err_to_virt err - translate ice errors into virtchnl errors + * @ice_err: status returned from ice function + */ +static enum virtchnl_status_code +ice_iov_err_to_virt_err(int ice_err) +{ + switch (ice_err) { + case 0: + return VIRTCHNL_STATUS_SUCCESS; + case ICE_ERR_BAD_PTR: + case ICE_ERR_INVAL_SIZE: + case ICE_ERR_DEVICE_NOT_SUPPORTED: + case ICE_ERR_PARAM: + case ICE_ERR_CFG: + return VIRTCHNL_STATUS_ERR_PARAM; + case ICE_ERR_NO_MEMORY: + return VIRTCHNL_STATUS_ERR_NO_MEMORY; + case ICE_ERR_NOT_READY: + case ICE_ERR_RESET_FAILED: + case ICE_ERR_FW_API_VER: + case ICE_ERR_AQ_ERROR: + case ICE_ERR_AQ_TIMEOUT: + case ICE_ERR_AQ_FULL: + case ICE_ERR_AQ_NO_WORK: + case ICE_ERR_AQ_EMPTY: + return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + default: + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } +} diff --git a/sys/dev/ice/ice_iov.h b/sys/dev/ice/ice_iov.h new file mode 100644 index 000000000000..c4fb3e932e3f --- /dev/null +++ b/sys/dev/ice/ice_iov.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2025, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file ice_iov.h + * @brief header for IOV functionality + * + * This header includes definitions used to implement device Virtual Functions + * for the ice driver. + */ + +#ifndef _ICE_IOV_H_ +#define _ICE_IOV_H_ + +#include <sys/types.h> +#include <sys/bus.h> +#include <sys/nv.h> +#include <sys/iov_schema.h> +#include <sys/param.h> +#include <dev/pci/pcivar.h> +#include <dev/pci/pcireg.h> + +#include <dev/pci/pci_iov.h> + +#include "ice_iflib.h" +#include "ice_vf_mbx.h" + +/** + * @enum ice_vf_flags + * @brief VF state flags + * + * Used to indicate the status of a PF's VF, as well as indicating what each VF + * is capabile of. Intended to be modified only using atomic operations, so + * they can be read and modified in places that aren't locked. + * + * Used in struct ice_vf's vf_flags field. + */ +enum ice_vf_flags { + VF_FLAG_ENABLED = BIT(0), + VF_FLAG_SET_MAC_CAP = BIT(1), + VF_FLAG_VLAN_CAP = BIT(2), + VF_FLAG_PROMISC_CAP = BIT(3), + VF_FLAG_MAC_ANTI_SPOOF = BIT(4), +}; + +/** + * @struct ice_vf + * @brief PF's VF software context + * + * Represents the state and options for a VF spawned from a PF. + */ +struct ice_vf { + struct ice_vsi *vsi; + u32 vf_flags; + + u8 mac[ETHER_ADDR_LEN]; + u16 vf_num; + struct virtchnl_version_info version; + + u16 mac_filter_limit; + u16 mac_filter_cnt; + u16 vlan_limit; + u16 vlan_cnt; + + u16 num_irq_vectors; + u16 *vf_imap; + struct ice_irq_vector *tx_irqvs; + struct ice_irq_vector *rx_irqvs; +}; + +#define ICE_PCIE_DEV_STATUS 0xAA + +#define ICE_PCI_CIAD_WAIT_COUNT 100 +#define ICE_PCI_CIAD_WAIT_DELAY_US 1 +#define ICE_VPGEN_VFRSTAT_WAIT_COUNT 100 +#define ICE_VPGEN_VFRSTAT_WAIT_DELAY_US 20 + +#define ICE_VIRTCHNL_VALID_PROMISC_FLAGS (FLAG_VF_UNICAST_PROMISC | \ + FLAG_VF_MULTICAST_PROMISC) + +#define ICE_DEFAULT_VF_VLAN_LIMIT 64 +#define ICE_DEFAULT_VF_FILTER_LIMIT 16 + +int ice_iov_attach(struct ice_softc *sc); +int ice_iov_detach(struct ice_softc *sc); + +int ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params); +int ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params); +void ice_iov_uninit(struct ice_softc *sc); + +void ice_iov_handle_vflr(struct ice_softc *sc); + +void ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event); +void ice_vc_notify_all_vfs_link_state(struct ice_softc *sc); + +#endif /* _ICE_IOV_H_ */ + diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c index d44ae5f37750..442111e5ffaf 100644 --- a/sys/dev/ice/ice_lib.c +++ b/sys/dev/ice/ice_lib.c @@ -42,6 +42,9 @@ #include "ice_lib.h" #include "ice_iflib.h" +#ifdef PCI_IOV +#include "ice_iov.h" +#endif #include <dev/pci/pcivar.h> #include <dev/pci/pcireg.h> #include <machine/resource.h> @@ -741,6 +744,12 @@ ice_initialize_vsi(struct ice_vsi *vsi) case ICE_VSI_VMDQ2: ctx.flags = ICE_AQ_VSI_TYPE_VMDQ2; break; +#ifdef PCI_IOV + case ICE_VSI_VF: + ctx.flags = ICE_AQ_VSI_TYPE_VF; + ctx.vf_num = vsi->vf_num; + break; +#endif default: return (ENODEV); } @@ -1607,6 +1616,12 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf case ICE_VSI_VMDQ2: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; break; +#ifdef PCI_IOV + case ICE_VSI_VF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_num; + break; +#endif default: return (ENODEV); } @@ -1660,6 +1675,10 @@ ice_cfg_vsi_for_tx(struct ice_vsi *vsi) struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tx_queue *txq = &vsi->tx_queues[i]; + /* Last configured queue */ + if (txq->desc_count == 0) + break; + pf_q = vsi->tx_qmap[txq->me]; qg->txqs[0].txq_id = htole16(pf_q); @@ -1788,6 +1807,10 @@ ice_cfg_vsi_for_rx(struct ice_vsi *vsi) for (i = 0; i < vsi->num_rx_queues; i++) { MPASS(vsi->mbuf_sz > 0); + /* Last configured queue */ + if (vsi->rx_queues[i].desc_count == 0) + break; + err = ice_setup_rx_ctx(&vsi->rx_queues[i]); if (err) return err; @@ -2257,6 +2280,11 @@ ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, case ice_aqc_opc_get_link_status: ice_process_link_event(sc, event); break; +#ifdef PCI_IOV + case ice_mbx_opc_send_msg_to_pf: + ice_vc_handle_vf_msg(sc, event); + break; +#endif case ice_aqc_opc_fw_logs_event: ice_handle_fw_log_event(sc, &event->desc, event->msg_buf); break; diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h index b6b23ec82161..308b2bda2790 100644 --- a/sys/dev/ice/ice_lib.h +++ b/sys/dev/ice/ice_lib.h @@ -611,6 +611,10 @@ struct ice_vsi { u16 mirror_src_vsi; u16 rule_mir_ingress; u16 rule_mir_egress; + +#ifdef PCI_IOV + u8 vf_num; /* Index of owning VF, if applicable */ +#endif }; /** diff --git a/sys/dev/ice/ice_vf_mbx.c b/sys/dev/ice/ice_vf_mbx.c new file mode 100644 index 000000000000..387a1c6739a6 --- /dev/null +++ b/sys/dev/ice/ice_vf_mbx.c @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2025, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ice_common.h" +#include "ice_hw_autogen.h" +#include "ice_vf_mbx.h" + +/** + * ice_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF ID to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to VF driver (0x0802) using mailbox + * queue and asynchronously sending message via + * ice_sq_send_cmd() function + */ +int +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd) +{ + struct ice_aqc_pf_vf_msg *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); + + cmd = &desc.params.virt; + cmd->id = CPU_TO_LE32(vfid); + + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + + if (msglen) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +/** + * ice_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to PF driver using mailbox queue. By default, this + * message is sent asynchronously, i.e. ice_sq_send_cmd() + * does not wait for completion before returning. + */ +int +ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode, + int v_retval, u8 *msg, u16 msglen, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + + if (msglen) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +static const u32 ice_legacy_aq_to_vc_speed[] = { + VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */ + VIRTCHNL_LINK_SPEED_100MB, + VIRTCHNL_LINK_SPEED_1GB, + VIRTCHNL_LINK_SPEED_1GB, + VIRTCHNL_LINK_SPEED_1GB, + VIRTCHNL_LINK_SPEED_10GB, + VIRTCHNL_LINK_SPEED_20GB, + VIRTCHNL_LINK_SPEED_25GB, + VIRTCHNL_LINK_SPEED_40GB, + VIRTCHNL_LINK_SPEED_40GB, + VIRTCHNL_LINK_SPEED_40GB, +}; + +/** + * ice_conv_link_speed_to_virtchnl + * @adv_link_support: determines the format of the returned link speed + * @link_speed: variable containing the link_speed to be converted + * + * Convert link speed supported by HW to link speed supported by virtchnl. + * If adv_link_support is true, then return link speed in Mbps. Else return + * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller + * needs to cast back to an enum virtchnl_link_speed in the case where + * adv_link_support is false, but when adv_link_support is true the caller can + * expect the speed in Mbps. + */ +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + /* convert a BIT() value into an array index */ + u16 index = (u16)(ice_fls(link_speed) - 1); + + if (adv_link_support) + return ice_get_link_speed(index); + else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed)) + /* Virtchnl speeds are not defined for every speed supported in + * the hardware. To maintain compatibility with older AVF + * drivers, while reporting the speed the new speed values are + * resolved to the closest known virtchnl speeds + */ + return ice_legacy_aq_to_vc_speed[index]; + + return VIRTCHNL_LINK_SPEED_UNKNOWN; +} + +/* The mailbox overflow detection algorithm helps to check if there + * is a possibility of a malicious VF transmitting too many MBX messages to the + * PF. + * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during + * driver initialization in ice_init_hw() using ice_mbx_init_snapshot(). + * The struct ice_mbx_snapshot helps to track and traverse a static window of + * messages within the mailbox queue while looking for a malicious VF. + * + * 2. When the caller starts processing its mailbox queue in response to an + * interrupt, the structure ice_mbx_snapshot is expected to be cleared before + * the algorithm can be run for the first time for that interrupt. This + * requires calling ice_mbx_reset_snapshot() as well as calling + * ice_mbx_reset_vf_info() for each VF tracking structure. + * + * 3. For every message read by the caller from the MBX Queue, the caller must + * call the detection algorithm's entry function ice_mbx_vf_state_handler(). + * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is + * filled as it is required to be passed to the algorithm. + * + * 4. Every time a message is read from the MBX queue, a tracking structure + * for the VF must be passed to the state handler. The boolean output + * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the + * caller whether it must report this VF as malicious or not. + * + * 5. When a VF is identified to be malicious, the caller can send a message + * to the system administrator. + * + * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info + * structure for each VF. The PF should clear the VF tracking structure if the + * VF is reset. When a VF is shut down and brought back up, we will then + * assume that the new VF is not malicious and may report it again if we + * detect it again. + * + * 7. The function ice_mbx_reset_snapshot() is called to reset the information + * in ice_mbx_snapshot for every new mailbox interrupt handled. + */ +#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M) +/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that + * the max messages check must be ignored in the algorithm + */ +#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF + +/** + * ice_mbx_reset_snapshot - Initialize mailbox snapshot structure + * @snap: pointer to the mailbox snapshot + */ +static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) +{ + struct ice_mbx_vf_info *vf_info; + + /* Clear mbx_buf in the mailbox snaphot structure and setting the + * mailbox snapshot state to a new capture. + */ + ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM); + snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + + /* Reset message counts for all VFs to zero */ + LIST_FOR_EACH_ENTRY(vf_info, &snap->mbx_vf, ice_mbx_vf_info, list_entry) + vf_info->msg_count = 0; +} + +/** + * ice_mbx_traverse - Pass through mailbox snapshot + * @hw: pointer to the HW struct + * @new_state: new algorithm state + * + * Traversing the mailbox static snapshot without checking + * for malicious VFs. + */ +static void +ice_mbx_traverse(struct ice_hw *hw, + enum ice_mbx_snapshot_state *new_state) +{ + struct ice_mbx_snap_buffer_data *snap_buf; + u32 num_iterations; + + snap_buf = &hw->mbx_snapshot.mbx_buf; + + /* As mailbox buffer is circular, applying a mask + * on the incremented iteration count. + */ + num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations); + + /* Checking either of the below conditions to exit snapshot traversal: + * Condition-1: If the number of iterations in the mailbox is equal to + * the mailbox head which would indicate that we have reached the end + * of the static snapshot. + * Condition-2: If the maximum messages serviced in the mailbox for a + * given interrupt is the highest possible value then there is no need + * to check if the number of messages processed is equal to it. If not + * check if the number of messages processed is greater than or equal + * to the maximum number of mailbox entries serviced in current work item. + */ + if (num_iterations == snap_buf->head || + (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT && + ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx)) + *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; +} + +/** + * ice_mbx_detect_malvf - Detect malicious VF in snapshot + * @hw: pointer to the HW struct + * @vf_info: mailbox tracking structure for a VF + * @new_state: new algorithm state + * @is_malvf: boolean output to indicate if VF is malicious + * + * This function tracks the number of asynchronous messages + * sent per VF and marks the VF as malicious if it exceeds + * the permissible number of messages to send. + */ +static int +ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info, + enum ice_mbx_snapshot_state *new_state, + bool *is_malvf) +{ + /* increment the message count for this VF */ + vf_info->msg_count++; + + if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD) + *is_malvf = true; + + /* continue to iterate through the mailbox snapshot */ + ice_mbx_traverse(hw, new_state); + + return 0; +} + +/** + * ice_e830_mbx_vf_dec_trig - Decrements the VF mailbox queue counter + * @hw: pointer to the HW struct + * @event: pointer to the control queue receive event + * + * This function triggers to decrement the counter + * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes + * the buffers at the PF mailbox queue. + */ +void ice_e830_mbx_vf_dec_trig(struct ice_hw *hw, + struct ice_rq_event_info *event) +{ + u16 vfid = LE16_TO_CPU(event->desc.retval); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); +} + +/** + * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count + * @hw: pointer to the HW struct + * @vf_id: VF ID in the PF space + * + * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should + * be called when a VF is created and on VF reset. + */ +void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id) +{ + u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id)); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg); +} + +/** + * ice_mbx_vf_state_handler - Handle states of the overflow algorithm + * @hw: pointer to the HW struct + * @mbx_data: pointer to structure containing mailbox data + * @vf_info: mailbox tracking structure for the VF in question + * @report_malvf: boolean output to indicate whether VF should be reported + * + * The function serves as an entry point for the malicious VF + * detection algorithm by handling the different states and state + * transitions of the algorithm: + * New snapshot: This state is entered when creating a new static + * snapshot. The data from any previous mailbox snapshot is + * cleared and a new capture of the mailbox head and tail is + * logged. This will be the new static snapshot to detect + * asynchronous messages sent by VFs. On capturing the snapshot + * and depending on whether the number of pending messages in that + * snapshot exceed the watermark value, the state machine enters + * traverse or detect states. + * Traverse: If pending message count is below watermark then iterate + * through the snapshot without any action on VF. + * Detect: If pending message count exceeds watermark traverse + * the static snapshot and look for a malicious VF. + */ +int +ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, + struct ice_mbx_vf_info *vf_info, bool *report_malvf) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + struct ice_mbx_snap_buffer_data *snap_buf; + struct ice_ctl_q_info *cq = &hw->mailboxq; + enum ice_mbx_snapshot_state new_state; + int status = 0; + bool is_malvf = false; + + if (!report_malvf || !mbx_data || !vf_info) + return ICE_ERR_BAD_PTR; + + *report_malvf = false; + + /* When entering the mailbox state machine assume that the VF + * is not malicious until detected. + */ + /* Checking if max messages allowed to be processed while servicing current + * interrupt is not less than the defined AVF message threshold. + */ + if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) + return ICE_ERR_INVAL_SIZE; + + /* The watermark value should not be lesser than the threshold limit + * set for the number of asynchronous messages a VF can send to mailbox + * nor should it be greater than the maximum number of messages in the + * mailbox serviced in current interrupt. + */ + if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || + mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) + return ICE_ERR_PARAM; + + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + snap_buf = &snap->mbx_buf; + + switch (snap_buf->state) { + case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT: + /* Clear any previously held data in mailbox snapshot structure. */ + ice_mbx_reset_snapshot(snap); + + /* Collect the pending ARQ count, number of messages processed and + * the maximum number of messages allowed to be processed from the + * Mailbox for current interrupt. + */ + snap_buf->num_pending_arq = mbx_data->num_pending_arq; + snap_buf->num_msg_proc = mbx_data->num_msg_proc; + snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx; + + /* Capture a new static snapshot of the mailbox by logging the + * head and tail of snapshot and set num_iterations to the tail + * value to mark the start of the iteration through the snapshot. + */ + snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + + mbx_data->num_pending_arq); + snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1); + snap_buf->num_iterations = snap_buf->tail; + + /* Pending ARQ messages returned by ice_clean_rq_elem + * is the difference between the head and tail of the + * mailbox queue. Comparing this value against the watermark + * helps to check if we potentially have malicious VFs. + */ + if (snap_buf->num_pending_arq >= + mbx_data->async_watermark_val) { + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf); + } else { + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + } + break; + + case ICE_MAL_VF_DETECT_STATE_TRAVERSE: + new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE; + ice_mbx_traverse(hw, &new_state); + break; + + case ICE_MAL_VF_DETECT_STATE_DETECT: + new_state = ICE_MAL_VF_DETECT_STATE_DETECT; + status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf); + break; + + default: + new_state = ICE_MAL_VF_DETECT_STATE_INVALID; + status = ICE_ERR_CFG; + } + + snap_buf->state = new_state; + + /* Only report VFs as malicious the first time we detect it */ + if (is_malvf && !vf_info->malicious) { + vf_info->malicious = 1; + *report_malvf = true; + } + + return status; +} + +/** + * ice_mbx_clear_malvf - Clear VF mailbox info + * @vf_info: the mailbox tracking structure for a VF + * + * In case of a VF reset, this function shall be called to clear the VF's + * current mailbox tracking state. + */ +void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info) +{ + vf_info->malicious = 0; + vf_info->msg_count = 0; +} + +/** + * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info + * @hw: pointer to the hardware structure + * @vf_info: the mailbox tracking info structure for a VF + * + * Initialize a VF mailbox tracking info structure and insert it into the + * snapshot list. + * + * If you remove the VF, you must also delete the associated VF info structure + * from the linked list. + */ +void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + ice_mbx_clear_malvf(vf_info); + LIST_ADD(&vf_info->list_entry, &snap->mbx_vf); +} + +/** + * ice_mbx_init_snapshot - Initialize mailbox snapshot data + * @hw: pointer to the hardware structure + * + * Clear the mailbox snapshot structure and initialize the VF mailbox list. + */ +void ice_mbx_init_snapshot(struct ice_hw *hw) +{ + struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; + + INIT_LIST_HEAD(&snap->mbx_vf); + ice_mbx_reset_snapshot(snap); +} diff --git a/sys/dev/ice/ice_vf_mbx.h b/sys/dev/ice/ice_vf_mbx.h new file mode 100644 index 000000000000..3b185ac89c11 --- /dev/null +++ b/sys/dev/ice/ice_vf_mbx.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2025, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ICE_VF_MBX_H_ +#define _ICE_VF_MBX_H_ + +#include "ice_type.h" +#include "ice_controlq.h" + +/* Defining the mailbox message threshold as 63 asynchronous + * pending messages. Normal VF functionality does not require + * sending more than 63 asynchronous pending message. + */ + + /* Threshold value should be used to initialize + * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT register. + */ +#define ICE_ASYNC_VF_MSG_THRESHOLD 63 + +int +ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode, + int v_retval, u8 *msg, u16 msglen, + struct ice_sq_cd *cd); +int +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd); + +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); + +void ice_e830_mbx_vf_dec_trig(struct ice_hw *hw, + struct ice_rq_event_info *event); +void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id); +int +ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, + struct ice_mbx_vf_info *vf_info, bool *report_malvf); +void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info); +void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info); +void ice_mbx_init_snapshot(struct ice_hw *hw); +#endif /* _ICE_VF_MBX_H_ */ diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c index e60ee0f1c5c3..1469d2916465 100644 --- a/sys/dev/ice/if_ice_iflib.c +++ b/sys/dev/ice/if_ice_iflib.c @@ -42,6 +42,9 @@ #include "ice_drv_info.h" #include "ice_switch.h" #include "ice_sched.h" +#ifdef PCI_IOV +#include "ice_iov.h" +#endif #include <sys/module.h> #include <sys/sockio.h> @@ -85,6 +88,12 @@ static int ice_if_suspend(if_ctx_t ctx); static int ice_if_resume(if_ctx_t ctx); static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); static void ice_init_link(struct ice_softc *sc); +#ifdef PCI_IOV +static int ice_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params); +static void ice_if_iov_uninit(if_ctx_t ctx); +static int ice_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params); +static void ice_if_vflr_handle(if_ctx_t ctx); +#endif static int ice_setup_mirror_vsi(struct ice_mirr_if *mif); static int ice_wire_mirror_intrs(struct ice_mirr_if *mif); static void ice_free_irqvs_subif(struct ice_mirr_if *mif); @@ -158,6 +167,11 @@ static device_method_t ice_methods[] = { DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), +#ifdef PCI_IOV + DEVMETHOD(pci_iov_init, iflib_device_iov_init), + DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), + DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), +#endif DEVMETHOD_END }; @@ -198,6 +212,12 @@ static device_method_t ice_iflib_methods[] = { DEVMETHOD(ifdi_suspend, ice_if_suspend), DEVMETHOD(ifdi_resume, ice_if_resume), DEVMETHOD(ifdi_needs_restart, ice_if_needs_restart), +#ifdef PCI_IOV + DEVMETHOD(ifdi_iov_vf_add, ice_if_iov_vf_add), + DEVMETHOD(ifdi_iov_init, ice_if_iov_init), + DEVMETHOD(ifdi_iov_uninit, ice_if_iov_uninit), + DEVMETHOD(ifdi_vflr_handle, ice_if_vflr_handle), +#endif DEVMETHOD_END }; @@ -733,6 +753,9 @@ ice_update_link_status(struct ice_softc *sc, bool update_media) iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); ice_rdma_link_change(sc, LINK_STATE_DOWN, 0); } +#ifdef PCI_IOV + ice_vc_notify_all_vfs_link_state(sc); +#endif update_media = true; } @@ -831,6 +854,14 @@ ice_if_attach_post(if_ctx_t ctx) ice_add_device_sysctls(sc); +#ifdef PCI_IOV + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV)) { + err = ice_iov_attach(sc); + if (err == ENOMEM) + return (err); + } +#endif /* PCI_IOV */ + /* Get DCBX/LLDP state and start DCBX agent */ ice_init_dcb_setup(sc); @@ -953,6 +984,11 @@ ice_if_detach(if_ctx_t ctx) ice_destroy_mirror_interface(sc); ice_rdma_pf_detach(sc); +#ifdef PCI_IOV + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV)) + ice_iov_detach(sc); +#endif /* PCI_IOV */ + /* Free allocated media types */ ifmedia_removeall(sc->media); @@ -1676,6 +1712,11 @@ ice_if_msix_intr_assign(if_ctx_t ctx, int msix) /* For future interrupt assignments */ sc->last_rid = rid + sc->irdma_vectors; +#ifdef PCI_IOV + /* Create soft IRQ for handling VF resets */ + iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, sc, 0, "iov"); +#endif + return (0); fail: for (; i >= 0; i--, vector--) @@ -2277,7 +2318,12 @@ ice_transition_recovery_mode(struct ice_softc *sc) ice_rdma_pf_detach(sc); ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); +#ifdef PCI_IOV + if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en)) + ice_iov_detach(sc); +#else ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); +#endif /* PCI_IOV */ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_vsi_del_txqs_ctx(vsi); @@ -2325,7 +2371,12 @@ ice_transition_safe_mode(struct ice_softc *sc) ice_rdma_pf_detach(sc); ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); +#ifdef PCI_IOV + if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en)) + ice_iov_detach(sc); +#else ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); +#endif /* PCI_IOV */ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); @@ -2410,6 +2461,15 @@ ice_if_update_admin_status(if_ctx_t ctx) /* Check and update link status */ ice_update_link_status(sc, false); +#ifdef PCI_IOV + /* + * Schedule VFs' reset handler after global resets + * and other events were processed. + */ + if (ice_testandclear_state(&sc->state, ICE_STATE_VFLR_PENDING)) + iflib_iov_intr_deferred(ctx); +#endif + /* * If there are still messages to process, we need to reschedule * ourselves. Otherwise, we can just re-enable the interrupt. We'll be @@ -3349,6 +3409,78 @@ ice_init_link(struct ice_softc *sc) } +#ifdef PCI_IOV +/** + * ice_if_iov_init - iov init handler for iflib + * @ctx: iflib context pointer + * @num_vfs: number of VFs to create + * @params: configuration parameters for the PF + * + * Configure the driver for SR-IOV mode. Used to setup things like memory + * before any VFs are created. + * + * @remark This is a wrapper for ice_iov_init + */ +static int +ice_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params) +{ + struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); + + return ice_iov_init(sc, num_vfs, params); +} + +/** + * ice_if_iov_uninit - iov uninit handler for iflib + * @ctx: iflib context pointer + * + * Destroys VFs and frees their memory and resources. + * + * @remark This is a wrapper for ice_iov_uninit + */ +static void +ice_if_iov_uninit(if_ctx_t ctx) +{ + struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); + + ice_iov_uninit(sc); +} + +/** + * ice_if_iov_vf_add - iov add vf handler for iflib + * @ctx: iflib context pointer + * @vfnum: index of VF to configure + * @params: configuration parameters for the VF + * + * Sets up the VF given by the vfnum index. This is called by the OS + * for each VF created by the PF driver after it is spawned. + * + * @remark This is a wrapper for ice_iov_vf_add + */ +static int +ice_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params) +{ + struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); + + return ice_iov_add_vf(sc, vfnum, params); +} + +/** + * ice_if_vflr_handle - iov VFLR handler + * @ctx: iflib context pointer + * + * Performs the necessar teardown or setup required for a VF after + * a VFLR is initiated. + * + * @remark This is a wrapper for ice_iov_handle_vflr + */ +static void +ice_if_vflr_handle(if_ctx_t ctx) +{ + struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); + ice_iov_handle_vflr(sc); +} +#endif /* PCI_IOV */ + extern struct if_txrx ice_subif_txrx; /** diff --git a/sys/dev/ichiic/ig4_pci.c b/sys/dev/ichiic/ig4_pci.c index 0195466150eb..3a49e220e335 100644 --- a/sys/dev/ichiic/ig4_pci.c +++ b/sys/dev/ichiic/ig4_pci.c @@ -186,6 +186,12 @@ static int ig4iic_pci_detach(device_t dev); #define PCI_CHIP_METEORLAKE_M_I2C_3 0x7e518086 #define PCI_CHIP_METEORLAKE_M_I2C_4 0x7e7a8086 #define PCI_CHIP_METEORLAKE_M_I2C_5 0x7e7b8086 +#define PCI_CHIP_ARROWLAKE_U_I2C_0 0x77788086 +#define PCI_CHIP_ARROWLAKE_U_I2C_1 0x77798086 +#define PCI_CHIP_ARROWLAKE_U_I2C_2 0x777a8086 +#define PCI_CHIP_ARROWLAKE_U_I2C_3 0x777b8086 +#define PCI_CHIP_ARROWLAKE_U_I2C_4 0x77508086 +#define PCI_CHIP_ARROWLAKE_U_I2C_5 0x77518086 struct ig4iic_pci_device { uint32_t devid; @@ -316,6 +322,12 @@ static struct ig4iic_pci_device ig4iic_pci_devices[] = { { PCI_CHIP_METEORLAKE_M_I2C_3, "Intel Meteor Lake-M I2C Controller-3", IG4_TIGERLAKE}, { PCI_CHIP_METEORLAKE_M_I2C_4, "Intel Meteor Lake-M I2C Controller-4", IG4_TIGERLAKE}, { PCI_CHIP_METEORLAKE_M_I2C_5, "Intel Meteor Lake-M I2C Controller-5", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_0, "Intel Arrow Lake-H/U I2C Controller-0", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_1, "Intel Arrow Lake-H/U I2C Controller-1", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_2, "Intel Arrow Lake-H/U I2C Controller-2", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_3, "Intel Arrow Lake-H/U I2C Controller-3", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_4, "Intel Arrow Lake-H/U I2C Controller-4", IG4_TIGERLAKE}, + { PCI_CHIP_ARROWLAKE_U_I2C_5, "Intel Arrow Lake-H/U I2C Controller-5", IG4_TIGERLAKE}, }; static int diff --git a/sys/dev/iicbus/gpio/tca64xx.c b/sys/dev/iicbus/gpio/tca64xx.c index 3b3bca9936f1..cd011ae9be75 100644 --- a/sys/dev/iicbus/gpio/tca64xx.c +++ b/sys/dev/iicbus/gpio/tca64xx.c @@ -261,14 +261,13 @@ tca64xx_attach(device_t dev) sc->addr = iicbus_get_addr(dev); mtx_init(&sc->mtx, "tca64xx gpio", "gpio", MTX_DEF); + OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); sc->busdev = gpiobus_attach_bus(dev); if (sc->busdev == NULL) { device_printf(dev, "Could not create busdev child\n"); return (ENXIO); } - OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); - #ifdef DEBUG switch (sc->chip) { case TCA6416_TYPE: diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c index 9c0324a24685..3f1d7a0cefba 100644 --- a/sys/dev/iicbus/iichid.c +++ b/sys/dev/iicbus/iichid.c @@ -275,62 +275,36 @@ iichid_cmd_read(struct iichid_softc* sc, void *buf, iichid_size_t maxlen, * 6.1.3 - Retrieval of Input Reports * DEVICE returns the length (2 Bytes) and the entire Input Report. */ - uint8_t actbuf[2] = { 0, 0 }; - /* Read actual input report length. */ + + memset(buf, 0xaa, 2); // In case nothing gets read struct iic_msg msgs[] = { - { sc->addr, IIC_M_RD | IIC_M_NOSTOP, sizeof(actbuf), actbuf }, + { sc->addr, IIC_M_RD, maxlen, buf }, }; - uint16_t actlen; int error; error = iicbus_transfer(sc->dev, msgs, nitems(msgs)); if (error != 0) return (error); - actlen = actbuf[0] | actbuf[1] << 8; -#ifdef IICHID_SAMPLING - if ((actlen == 0 && sc->sampling_rate_slow < 0) || - (maxlen == 0 && sc->sampling_rate_slow >= 0)) { -#else + DPRINTFN(sc, 5, "%*D\n", msgs[0].len, msgs[0].buf, " "); + + uint16_t actlen = le16dec(buf); + if (actlen == 0) { -#endif - /* Read and discard reset command response. */ - msgs[0] = (struct iic_msg) - { sc->addr, IIC_M_RD | IIC_M_NOSTART, - le16toh(sc->desc.wMaxInputLength) - 2, sc->intr_buf }; - actlen = 0; if (!sc->reset_acked) { mtx_lock(&sc->mtx); sc->reset_acked = true; wakeup(&sc->reset_acked); mtx_unlock(&sc->mtx); } -#ifdef IICHID_SAMPLING - } else if ((actlen <= 2 || actlen == 0xFFFF) && - sc->sampling_rate_slow >= 0) { - /* Read and discard 1 byte to send I2C STOP condition. */ - msgs[0] = (struct iic_msg) - { sc->addr, IIC_M_RD | IIC_M_NOSTART, 1, actbuf }; - actlen = 0; -#endif - } else { - actlen -= 2; - if (actlen > maxlen) { - DPRINTF(sc, "input report too big. requested=%d " - "received=%d\n", maxlen, actlen); - actlen = maxlen; - } - /* Read input report itself. */ - msgs[0] = (struct iic_msg) - { sc->addr, IIC_M_RD | IIC_M_NOSTART, actlen, buf }; } - error = iicbus_transfer(sc->dev, msgs, 1); - if (error == 0 && actual_len != NULL) + if (actlen <= 2 || actlen > maxlen) { + actlen = 0; + } + if (actual_len != NULL) { *actual_len = actlen; - - DPRINTFN(sc, 5, - "%*D - %*D\n", 2, actbuf, " ", msgs[0].len, msgs[0].buf, " "); + } return (error); } @@ -566,7 +540,7 @@ iichid_sampling_task(void *context, int pending) error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual); if (error == 0) { if (actual > 0) { - sc->intr_handler(sc->intr_ctx, sc->intr_buf, actual); + sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual); sc->missing_samples = 0; if (sc->dup_size != actual || memcmp(sc->dup_buf, sc->intr_buf, actual) != 0) { @@ -577,7 +551,7 @@ iichid_sampling_task(void *context, int pending) ++sc->dup_samples; } else { if (++sc->missing_samples == 1) - sc->intr_handler(sc->intr_ctx, sc->intr_buf, 0); + sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, 0); sc->dup_samples = 0; } } else @@ -632,7 +606,7 @@ iichid_intr(void *context) if (error == 0) { if (sc->power_on && sc->open) { if (actual != 0) - sc->intr_handler(sc->intr_ctx, sc->intr_buf, + sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual); else DPRINTF(sc, "no data received\n"); @@ -842,11 +816,12 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr, sc = device_get_softc(dev); /* - * Do not rely on wMaxInputLength, as some devices may set it to - * a wrong length. Find the longest input report in report descriptor. + * Do not rely just on wMaxInputLength, as some devices (which?) + * may set it to a wrong length. Also find the longest input report + * in report descriptor, and add two for the length field. */ - rdesc->rdsize = - MAX(rdesc->isize, le16toh(sc->desc.wMaxInputLength) - 2); + rdesc->rdsize = 2 + + MAX(rdesc->isize, le16toh(sc->desc.wMaxInputLength)); /* Write and get/set_report sizes are limited by I2C-HID protocol. */ rdesc->grsize = rdesc->srsize = IICHID_SIZE_MAX; rdesc->wrsize = IICHID_SIZE_MAX; @@ -919,7 +894,7 @@ iichid_intr_poll(device_t dev, device_t child __unused) sc = device_get_softc(dev); error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual); if (error == 0 && actual != 0) - sc->intr_handler(sc->intr_ctx, sc->intr_buf, actual); + sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual); } /* @@ -946,6 +921,7 @@ iichid_read(device_t dev, device_t child __unused, void *buf, { struct iichid_softc *sc; device_t parent; + uint8_t *tmpbuf; int error; if (maxlen > IICHID_SIZE_MAX) @@ -954,8 +930,12 @@ iichid_read(device_t dev, device_t child __unused, void *buf, parent = device_get_parent(sc->dev); error = iicbus_request_bus(parent, sc->dev, IIC_WAIT); if (error == 0) { - error = iichid_cmd_read(sc, buf, maxlen, actlen); + tmpbuf = malloc(maxlen + 2, M_DEVBUF, M_WAITOK | M_ZERO); + error = iichid_cmd_read(sc, tmpbuf, maxlen + 2, actlen); iicbus_release_bus(parent, sc->dev); + if (*actlen > 0) + memcpy(buf, tmpbuf + 2, *actlen); + free(tmpbuf, M_DEVBUF); } return (iic2errno(error)); } diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 741a7c013f7d..ec1664fac701 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -11,9 +11,9 @@ */ /*- - * The following functions are based on the vn(4) driver: mdstart_swap(), - * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), - * and as such under the following copyright: + * The following functions are based on the historical vn(4) driver: + * mdstart_swap(), mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() + * and mddestroy(), and as such under the following copyright: * * Copyright (c) 1988 University of Utah. * Copyright (c) 1990, 1993 @@ -89,6 +89,8 @@ #include <sys/unistd.h> #include <sys/vnode.h> #include <sys/disk.h> +#include <sys/param.h> +#include <sys/bus.h> #include <geom/geom.h> #include <geom/geom_int.h> @@ -2082,8 +2084,10 @@ g_md_init(struct g_class *mp __unused) { caddr_t mod; u_char *ptr, *name, *type; + u_char scratch[40]; unsigned len; int i; + vm_offset_t paddr; /* figure out log2(NINDIR) */ for (i = NINDIR, nshift = -1; i; nshift++) @@ -2123,6 +2127,25 @@ g_md_init(struct g_class *mp __unused) sx_xunlock(&md_sx); } } + + /* + * Load up to 32 pre-loaded disks + */ + for (int i = 0; i < 32; i++) { + if (resource_long_value("md", i, "physaddr", + (long *) &paddr) != 0 || + resource_int_value("md", i, "len", &len) != 0) + break; + ptr = (char *)pmap_map(NULL, paddr, paddr + len, VM_PROT_READ); + if (ptr != NULL && len != 0) { + sprintf(scratch, "preload%d 0x%016jx", i, + (uintmax_t)paddr); + sx_xlock(&md_sx); + md_preloaded(ptr, len, scratch); + sx_xunlock(&md_sx); + } + } + status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 0600, MDCTL_NAME); g_topology_lock(); diff --git a/sys/dev/mem/memutil.c b/sys/dev/mem/memutil.c index cf9714d6ec8f..20ce337df0ab 100644 --- a/sys/dev/mem/memutil.c +++ b/sys/dev/mem/memutil.c @@ -26,15 +26,14 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/param.h> +#include <sys/systm.h> #include <sys/kernel.h> #include <sys/lock.h> #include <sys/malloc.h> #include <sys/memrange.h> -#include <sys/rwlock.h> -#include <sys/systm.h> +#include <sys/sx.h> -static struct rwlock mr_lock; +static struct sx mr_lock; /* * Implementation-neutral, kernel-callable functions for manipulating @@ -46,7 +45,7 @@ mem_range_init(void) if (mem_range_softc.mr_op == NULL) return; - rw_init(&mr_lock, "memrange"); + sx_init(&mr_lock, "memrange"); mem_range_softc.mr_op->init(&mem_range_softc); } @@ -56,7 +55,7 @@ mem_range_destroy(void) if (mem_range_softc.mr_op == NULL) return; - rw_destroy(&mr_lock); + sx_destroy(&mr_lock); } int @@ -67,12 +66,12 @@ mem_range_attr_get(struct mem_range_desc *mrd, int *arg) if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); nd = *arg; - rw_rlock(&mr_lock); + sx_slock(&mr_lock); if (nd == 0) *arg = mem_range_softc.mr_ndesc; else bcopy(mem_range_softc.mr_desc, mrd, nd * sizeof(*mrd)); - rw_runlock(&mr_lock); + sx_sunlock(&mr_lock); return (0); } @@ -83,8 +82,8 @@ mem_range_attr_set(struct mem_range_desc *mrd, int *arg) if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); - rw_wlock(&mr_lock); + sx_xlock(&mr_lock); ret = mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg); - rw_wunlock(&mr_lock); + sx_xunlock(&mr_lock); return (ret); } diff --git a/sys/dev/mgb/if_mgb.c b/sys/dev/mgb/if_mgb.c index 1240d0f84415..409f34167df0 100644 --- a/sys/dev/mgb/if_mgb.c +++ b/sys/dev/mgb/if_mgb.c @@ -1435,7 +1435,7 @@ mgb_hw_teardown(struct mgb_softc *sc) /* Stop MAC */ CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); - CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); + CSR_CLEAR_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0))) return (err); if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0))) diff --git a/sys/dev/mlx5/mlx5_accel/ipsec.h b/sys/dev/mlx5/mlx5_accel/ipsec.h index 361b9f72d873..c3f3a2372482 100644 --- a/sys/dev/mlx5/mlx5_accel/ipsec.h +++ b/sys/dev/mlx5/mlx5_accel/ipsec.h @@ -260,8 +260,8 @@ int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv); void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv); int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr); -void mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe, - struct mlx5e_rq_mbuf *mr); +void mlx5e_accel_ipsec_handle_rx_cqe(if_t ifp, struct mbuf *mb, + struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr); static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe) { @@ -269,12 +269,12 @@ static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe) } static inline void -mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe, +mlx5e_accel_ipsec_handle_rx(if_t ifp, struct mbuf *mb, struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr) { u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); if (MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data)) - mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe, mr); + mlx5e_accel_ipsec_handle_rx_cqe(ifp, mb, cqe, mr); } #endif /* __MLX5_ACCEL_IPSEC_H__ */ diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c index 0883cfb2d510..5dccb8bc2b87 100644 --- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c +++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c @@ -24,11 +24,14 @@ * */ +#include "opt_ipsec.h" + #include <sys/mbuf.h> #include <sys/socket.h> #include <netinet/in.h> #include <netipsec/keydb.h> #include <netipsec/ipsec_offload.h> +#include <netipsec/xform.h> #include <dev/mlx5/qp.h> #include <dev/mlx5/mlx5_en/en.h> #include <dev/mlx5/mlx5_accel/ipsec.h> @@ -48,7 +51,8 @@ mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr) return (0); mtag = (struct ipsec_accel_in_tag *)m_tag_get( - PACKET_TAG_IPSEC_ACCEL_IN, sizeof(*mtag), M_NOWAIT); + PACKET_TAG_IPSEC_ACCEL_IN, sizeof(struct ipsec_accel_in_tag) - + __offsetof(struct ipsec_accel_in_tag, xh), M_NOWAIT); if (mtag == NULL) return (-ENOMEM); mr->ipsec_mtag = mtag; @@ -56,8 +60,8 @@ mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr) } void -mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe, - struct mlx5e_rq_mbuf *mr) +mlx5e_accel_ipsec_handle_rx_cqe(if_t ifp, struct mbuf *mb, + struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr) { struct ipsec_accel_in_tag *mtag; u32 drv_spi; @@ -65,10 +69,12 @@ mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe, drv_spi = MLX5_IPSEC_METADATA_HANDLE(be32_to_cpu(cqe->ft_metadata)); mtag = mr->ipsec_mtag; WARN_ON(mtag == NULL); - mr->ipsec_mtag = NULL; if (mtag != NULL) { mtag->drv_spi = drv_spi; - m_tag_prepend(mb, &mtag->tag); + if (ipsec_accel_fill_xh(ifp, drv_spi, &mtag->xh)) { + m_tag_prepend(mb, &mtag->tag); + mr->ipsec_mtag = NULL; + } } } diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c index 8b8f2e570245..89d2010656c5 100644 --- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c +++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c @@ -42,13 +42,30 @@ static if_snd_tag_free_t mlx5e_tls_rx_snd_tag_free; static if_snd_tag_modify_t mlx5e_tls_rx_snd_tag_modify; +static if_snd_tag_status_str_t mlx5e_tls_rx_snd_tag_status_str; static const struct if_snd_tag_sw mlx5e_tls_rx_snd_tag_sw = { .snd_tag_modify = mlx5e_tls_rx_snd_tag_modify, .snd_tag_free = mlx5e_tls_rx_snd_tag_free, + .snd_tag_status_str = mlx5e_tls_rx_snd_tag_status_str, .type = IF_SND_TAG_TYPE_TLS_RX }; +static const char *mlx5e_tls_rx_progress_params_auth_state_str[] = { + [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD] = "no_offload", + [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD] = "offload", + [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION] = + "authentication", +}; + +static const char *mlx5e_tls_rx_progress_params_record_tracker_state_str[] = { + [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START] = "start", + [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING] = + "tracking", + [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING] = + "searching", +}; + MALLOC_DEFINE(M_MLX5E_TLS_RX, "MLX5E_TLS_RX", "MLX5 ethernet HW TLS RX"); /* software TLS RX context */ @@ -250,7 +267,8 @@ mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq *iq, mtx_unlock(&iq->lock); while (1) { - if (wait_for_completion_timeout(&ptag->progress_complete, hz) != 0) + if (wait_for_completion_timeout(&ptag->progress_complete, + msecs_to_jiffies(1000)) != 0) break; priv = container_of(iq, struct mlx5e_channel, iq)->priv; if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || @@ -331,7 +349,8 @@ done: * Zero is returned upon success, else some error happened. */ static int -mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag *ptag) +mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, + struct mlx5e_tls_rx_tag *ptag, mlx5e_iq_callback_t *cb) { struct mlx5e_get_tls_progress_params_wqe *wqe; const u32 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); @@ -367,7 +386,7 @@ mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_r memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32)); iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - iq->data[pi].callback = &mlx5e_tls_rx_receive_progress_parameters_cb; + iq->data[pi].callback = cb; iq->data[pi].arg = ptag; m_snd_tag_ref(&ptag->tag); @@ -640,7 +659,8 @@ mlx5e_tls_rx_set_params(void *ctx, struct inpcb *inp, const struct tls_session_p return (EINVAL); MLX5_SET64(sw_tls_rx_cntx, ctx, param.initial_record_number, tls_sn_he); - MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, tcp_sn_he); + MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, 0); + MLX5_SET(sw_tls_rx_cntx, ctx, progress.next_record_tcp_sn, tcp_sn_he); return (0); } @@ -819,6 +839,7 @@ mlx5e_tls_rx_snd_tag_alloc(if_t ifp, } ptag->flow_rule = flow_rule; + init_completion(&ptag->progress_complete); return (0); @@ -968,7 +989,8 @@ mlx5e_tls_rx_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_param params->tls_rx.tls_rec_length, params->tls_rx.tls_seq_number) && ptag->tcp_resync_pending == 0) { - err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag); + err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag, + &mlx5e_tls_rx_receive_progress_parameters_cb); if (err != 0) { MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_err, 1); } else { @@ -1001,6 +1023,74 @@ mlx5e_tls_rx_snd_tag_free(struct m_snd_tag *pmt) queue_work(priv->tls_rx.wq, &ptag->work); } +static void +mlx5e_tls_rx_str_status_cb(void *arg) +{ + struct mlx5e_tls_rx_tag *ptag; + + ptag = (struct mlx5e_tls_rx_tag *)arg; + complete_all(&ptag->progress_complete); + m_snd_tag_rele(&ptag->tag); +} + +static int +mlx5e_tls_rx_snd_tag_status_str(struct m_snd_tag *pmt, char *buf, size_t *sz) +{ + int err, out_size; + struct mlx5e_iq *iq; + void *buffer; + uint32_t tracker_state_val; + uint32_t auth_state_val; + struct mlx5e_priv *priv; + struct mlx5e_tls_rx_tag *ptag = + container_of(pmt, struct mlx5e_tls_rx_tag, tag); + + if (buf == NULL) + return (0); + + MLX5E_TLS_RX_TAG_LOCK(ptag); + priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx); + iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype); + reinit_completion(&ptag->progress_complete); + err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag, + &mlx5e_tls_rx_str_status_cb); + MLX5E_TLS_RX_TAG_UNLOCK(ptag); + if (err != 0) + return (err); + + for (;;) { + if (wait_for_completion_timeout(&ptag->progress_complete, + msecs_to_jiffies(1000)) != 0) + break; + if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + pci_channel_offline(priv->mdev->pdev) != 0) + return (ENXIO); + } + buffer = mlx5e_tls_rx_get_progress_buffer(ptag); + tracker_state_val = MLX5_GET(tls_progress_params, buffer, + record_tracker_state); + auth_state_val = MLX5_GET(tls_progress_params, buffer, auth_state); + + /* Validate tracker state value is in range */ + if (tracker_state_val > + MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING) + return (EINVAL); + + /* Validate auth state value is in range */ + if (auth_state_val > + MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION) + return (EINVAL); + + out_size = snprintf(buf, *sz, "tracker_state: %s, auth_state: %s", + mlx5e_tls_rx_progress_params_record_tracker_state_str[ + tracker_state_val], + mlx5e_tls_rx_progress_params_auth_state_str[auth_state_val]); + + if (out_size <= *sz) + *sz = out_size; + return (0); +} + #else int diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c index 6b53db6fea23..eb569488631a 100644 --- a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c +++ b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c @@ -467,7 +467,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, break; } - mlx5e_accel_ipsec_handle_rx(mb, cqe, mr); + mlx5e_accel_ipsec_handle_rx(ifp, mb, cqe, mr); } static inline void diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c index 73a7cee4aad0..fd7f00ced14b 100644 --- a/sys/dev/nvme/nvme_ctrlr.c +++ b/sys/dev/nvme/nvme_ctrlr.c @@ -48,7 +48,7 @@ #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */ static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, - struct nvme_async_event_request *aer); + struct nvme_async_event_request *aer); static void nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags) @@ -680,96 +680,6 @@ nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, } static void -nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) -{ - struct nvme_async_event_request *aer = arg; - struct nvme_health_information_page *health_info; - struct nvme_ns_list *nsl; - struct nvme_error_information_entry *err; - int i; - - /* - * If the log page fetch for some reason completed with an error, - * don't pass log page data to the consumers. In practice, this case - * should never happen. - */ - if (nvme_completion_is_error(cpl)) - nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, - aer->log_page_id, NULL, 0); - else { - /* Convert data to host endian */ - switch (aer->log_page_id) { - case NVME_LOG_ERROR: - err = (struct nvme_error_information_entry *)aer->log_page_buffer; - for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) - nvme_error_information_entry_swapbytes(err++); - break; - case NVME_LOG_HEALTH_INFORMATION: - nvme_health_information_page_swapbytes( - (struct nvme_health_information_page *)aer->log_page_buffer); - break; - case NVME_LOG_CHANGED_NAMESPACE: - nvme_ns_list_swapbytes( - (struct nvme_ns_list *)aer->log_page_buffer); - break; - case NVME_LOG_COMMAND_EFFECT: - nvme_command_effects_page_swapbytes( - (struct nvme_command_effects_page *)aer->log_page_buffer); - break; - case NVME_LOG_RES_NOTIFICATION: - nvme_res_notification_page_swapbytes( - (struct nvme_res_notification_page *)aer->log_page_buffer); - break; - case NVME_LOG_SANITIZE_STATUS: - nvme_sanitize_status_page_swapbytes( - (struct nvme_sanitize_status_page *)aer->log_page_buffer); - break; - default: - break; - } - - if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { - health_info = (struct nvme_health_information_page *) - aer->log_page_buffer; - nvme_ctrlr_log_critical_warnings(aer->ctrlr, - health_info->critical_warning); - /* - * Critical warnings reported through the - * SMART/health log page are persistent, so - * clear the associated bits in the async event - * config so that we do not receive repeated - * notifications for the same event. - */ - aer->ctrlr->async_event_config &= - ~health_info->critical_warning; - nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, - aer->ctrlr->async_event_config, NULL, NULL); - } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && - !nvme_use_nvd) { - nsl = (struct nvme_ns_list *)aer->log_page_buffer; - for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { - if (nsl->ns[i] > NVME_MAX_NAMESPACES) - break; - nvme_notify_ns(aer->ctrlr, nsl->ns[i]); - } - } - - /* - * Pass the cpl data from the original async event completion, - * not the log page fetch. - */ - nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, - aer->log_page_id, aer->log_page_buffer, aer->log_page_size); - } - - /* - * Repost another asynchronous event request to replace the one - * that just completed. - */ - nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); -} - -static void nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_async_event_request *aer = arg; @@ -784,33 +694,18 @@ nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) return; } - /* Associated log page is in bits 23:16 of completion entry dw0. */ + /* + * Save the completion status and associated log page is in bits 23:16 + * of completion entry dw0. Print a message and queue it for further + * processing. + */ + memcpy(&aer->cpl, cpl, sizeof(*cpl)); aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0); - nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0), NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0), aer->log_page_id); - - if (is_log_page_id_valid(aer->log_page_id)) { - aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, - aer->log_page_id); - memcpy(&aer->cpl, cpl, sizeof(*cpl)); - nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, - NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, - aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, - aer); - /* Wait to notify consumers until after log page is fetched. */ - } else { - nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, - NULL, 0); - - /* - * Repost another asynchronous event request to replace the one - * that just completed. - */ - nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); - } + taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task); } static void @@ -819,15 +714,21 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, { struct nvme_request *req; - aer->ctrlr = ctrlr; /* - * XXX-MJ this should be M_WAITOK but we might be in a non-sleepable - * callback context. AER completions should be handled on a dedicated - * thread. + * We're racing the reset thread, so let that process submit this again. + * XXX does this really solve that race? And is that race even possible + * since we only reset when we've no theard from the card in a long + * time. Why would we get an AER in the middle of that just before we + * kick off the reset? */ - req = nvme_allocate_request_null(M_NOWAIT, nvme_ctrlr_async_event_cb, + if (ctrlr->is_resetting) + return; + + aer->ctrlr = ctrlr; + req = nvme_allocate_request_null(M_WAITOK, nvme_ctrlr_async_event_cb, aer); aer->req = req; + aer->log_page_id = 0; /* Not a valid page */ /* * Disable timeout here, since asynchronous event requests should by @@ -1203,6 +1104,140 @@ nvme_ctrlr_reset_task(void *arg, int pending) atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); } +static void +nvme_ctrlr_aer_done(void *arg, const struct nvme_completion *cpl) +{ + struct nvme_async_event_request *aer = arg; + + mtx_lock(&aer->mtx); + if (nvme_completion_is_error(cpl)) + aer->log_page_size = (uint32_t)-1; + else + aer->log_page_size = nvme_ctrlr_get_log_page_size( + aer->ctrlr, aer->log_page_id); + wakeup(aer); + mtx_unlock(&aer->mtx); +} + +static void +nvme_ctrlr_aer_task(void *arg, int pending) +{ + struct nvme_async_event_request *aer = arg; + struct nvme_controller *ctrlr = aer->ctrlr; + uint32_t len; + + /* + * We're resetting, so just punt. + */ + if (ctrlr->is_resetting) + return; + + if (!is_log_page_id_valid(aer->log_page_id)) { + /* + * Repost another asynchronous event request to replace the one + * that just completed. + */ + nvme_notify_async_consumers(ctrlr, &aer->cpl, aer->log_page_id, + NULL, 0); + nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); + goto out; + } + + aer->log_page_size = 0; + len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id); + nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, + NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len, + nvme_ctrlr_aer_done, aer); + mtx_lock(&aer->mtx); + while (aer->log_page_size == 0) + mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0); + mtx_unlock(&aer->mtx); + + if (aer->log_page_size != (uint32_t)-1) { + /* + * If the log page fetch for some reason completed with an + * error, don't pass log page data to the consumers. In + * practice, this case should never happen. + */ + nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, + aer->log_page_id, NULL, 0); + goto out; + } + + /* Convert data to host endian */ + switch (aer->log_page_id) { + case NVME_LOG_ERROR: { + struct nvme_error_information_entry *err = + (struct nvme_error_information_entry *)aer->log_page_buffer; + for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) + nvme_error_information_entry_swapbytes(err++); + break; + } + case NVME_LOG_HEALTH_INFORMATION: + nvme_health_information_page_swapbytes( + (struct nvme_health_information_page *)aer->log_page_buffer); + break; + case NVME_LOG_CHANGED_NAMESPACE: + nvme_ns_list_swapbytes( + (struct nvme_ns_list *)aer->log_page_buffer); + break; + case NVME_LOG_COMMAND_EFFECT: + nvme_command_effects_page_swapbytes( + (struct nvme_command_effects_page *)aer->log_page_buffer); + break; + case NVME_LOG_RES_NOTIFICATION: + nvme_res_notification_page_swapbytes( + (struct nvme_res_notification_page *)aer->log_page_buffer); + break; + case NVME_LOG_SANITIZE_STATUS: + nvme_sanitize_status_page_swapbytes( + (struct nvme_sanitize_status_page *)aer->log_page_buffer); + break; + default: + break; + } + + if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { + struct nvme_health_information_page *health_info = + (struct nvme_health_information_page *)aer->log_page_buffer; + + /* + * Critical warnings reported through the SMART/health log page + * are persistent, so clear the associated bits in the async + * event config so that we do not receive repeated notifications + * for the same event. + */ + nvme_ctrlr_log_critical_warnings(aer->ctrlr, + health_info->critical_warning); + aer->ctrlr->async_event_config &= + ~health_info->critical_warning; + nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, + aer->ctrlr->async_event_config, NULL, NULL); + } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) { + struct nvme_ns_list *nsl = + (struct nvme_ns_list *)aer->log_page_buffer; + for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { + if (nsl->ns[i] > NVME_MAX_NAMESPACES) + break; + nvme_notify_ns(aer->ctrlr, nsl->ns[i]); + } + } + + /* + * Pass the cpl data from the original async event completion, not the + * log page fetch. + */ + nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, + aer->log_page_id, aer->log_page_buffer, aer->log_page_size); + + /* + * Repost another asynchronous event request to replace the one + * that just completed. + */ +out: + nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); +} + /* * Poll all the queues enabled on the device for completion. */ @@ -1574,13 +1609,8 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) /* * Create 2 threads for the taskqueue. The reset thread will block when * it detects that the controller has failed until all I/O has been - * failed up the stack. The fail_req task needs to be able to run in - * this case to finish the request failure for some cases. - * - * We could partially solve this race by draining the failed requeust - * queue before proceding to free the sim, though nothing would stop - * new I/O from coming in after we do that drain, but before we reach - * cam_sim_free, so this big hammer is used instead. + * failed up the stack. The second thread is used for AER events, which + * can block, but only briefly for memory and log page fetching. */ ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, taskqueue_thread_enqueue, &ctrlr->taskqueue); @@ -1590,7 +1620,12 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) ctrlr->is_initialized = false; ctrlr->notification_sent = 0; TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); - STAILQ_INIT(&ctrlr->fail_req); + for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) { + struct nvme_async_event_request *aer = &ctrlr->aer[i]; + + TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer); + mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF); + } ctrlr->is_failed = false; make_dev_args_init(&md_args); @@ -1678,8 +1713,14 @@ nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) } noadminq: - if (ctrlr->taskqueue) + if (ctrlr->taskqueue) { taskqueue_free(ctrlr->taskqueue); + for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) { + struct nvme_async_event_request *aer = &ctrlr->aer[i]; + + mtx_destroy(&aer->mtx); + } + } if (ctrlr->tag) bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h index 949e69ec9290..36f00fedc48e 100644 --- a/sys/dev/nvme/nvme_private.h +++ b/sys/dev/nvme/nvme_private.h @@ -123,6 +123,8 @@ struct nvme_request { struct nvme_async_event_request { struct nvme_controller *ctrlr; struct nvme_request *req; + struct task task; + struct mtx mtx; struct nvme_completion cpl; uint32_t log_page_id; uint32_t log_page_size; @@ -307,8 +309,6 @@ struct nvme_controller { bool isr_warned; bool is_initialized; - STAILQ_HEAD(, nvme_request) fail_req; - /* Host Memory Buffer */ int hmb_nchunks; size_t hmb_chunk; diff --git a/sys/dev/nvmf/controller/nvmft_subr.c b/sys/dev/nvmf/controller/nvmft_subr.c index bb2bc0988e81..245971813854 100644 --- a/sys/dev/nvmf/controller/nvmft_subr.c +++ b/sys/dev/nvmf/controller/nvmft_subr.c @@ -26,46 +26,6 @@ nvmf_nqn_valid(const char *nqn) len = strnlen(nqn, NVME_NQN_FIELD_SIZE); if (len == 0 || len > NVMF_NQN_MAX_LEN) return (false); - -#ifdef STRICT_CHECKS - /* - * Stricter checks from the spec. Linux does not seem to - * require these. - */ - - /* - * NVMF_NQN_MIN_LEN does not include '.', and require at least - * one character of a domain name. - */ - if (len < NVMF_NQN_MIN_LEN + 2) - return (false); - if (memcmp("nqn.", nqn, strlen("nqn.")) != 0) - return (false); - nqn += strlen("nqn."); - - /* Next 4 digits must be a year. */ - for (u_int i = 0; i < 4; i++) { - if (!isdigit(nqn[i])) - return (false); - } - nqn += 4; - - /* '-' between year and month. */ - if (nqn[0] != '-') - return (false); - nqn++; - - /* 2 digit month. */ - for (u_int i = 0; i < 2; i++) { - if (!isdigit(nqn[i])) - return (false); - } - nqn += 2; - - /* '.' between month and reverse domain name. */ - if (nqn[0] != '.') - return (false); -#endif return (true); } diff --git a/sys/dev/nvmf/host/nvmf.c b/sys/dev/nvmf/host/nvmf.c index dbdd4568bdf1..1ac0d142443b 100644 --- a/sys/dev/nvmf/host/nvmf.c +++ b/sys/dev/nvmf/host/nvmf.c @@ -27,6 +27,7 @@ #include <dev/nvmf/host/nvmf_var.h> static struct cdevsw nvmf_cdevsw; +static struct taskqueue *nvmf_tq; bool nvmf_fail_disconnect = false; SYSCTL_BOOL(_kern_nvmf, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN, @@ -34,7 +35,10 @@ SYSCTL_BOOL(_kern_nvmf, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN, MALLOC_DEFINE(M_NVMF, "nvmf", "NVMe over Fabrics host"); +static void nvmf_controller_loss_task(void *arg, int pending); static void nvmf_disconnect_task(void *arg, int pending); +static void nvmf_request_reconnect(struct nvmf_softc *sc); +static void nvmf_request_reconnect_task(void *arg, int pending); static void nvmf_shutdown_pre_sync(void *arg, int howto); static void nvmf_shutdown_post_sync(void *arg, int howto); @@ -294,6 +298,9 @@ nvmf_establish_connection(struct nvmf_softc *sc, nvlist_t *nvl) admin = nvlist_get_nvlist(nvl, "admin"); io = nvlist_get_nvlist_array(nvl, "io", &num_io_queues); kato = dnvlist_get_number(nvl, "kato", 0); + sc->reconnect_delay = dnvlist_get_number(nvl, "reconnect_delay", 0); + sc->controller_loss_timeout = dnvlist_get_number(nvl, + "controller_loss_timeout", 0); /* Setup the admin queue. */ sc->admin = nvmf_init_qp(sc, trtype, admin, "admin queue", 0); @@ -504,6 +511,10 @@ nvmf_attach(device_t dev) callout_init(&sc->ka_tx_timer, 1); sx_init(&sc->connection_lock, "nvmf connection"); TASK_INIT(&sc->disconnect_task, 0, nvmf_disconnect_task, sc); + TIMEOUT_TASK_INIT(nvmf_tq, &sc->controller_loss_task, 0, + nvmf_controller_loss_task, sc); + TIMEOUT_TASK_INIT(nvmf_tq, &sc->request_reconnect_task, 0, + nvmf_request_reconnect_task, sc); oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ioq", @@ -603,7 +614,9 @@ out: nvmf_destroy_aer(sc); - taskqueue_drain(taskqueue_thread, &sc->disconnect_task); + taskqueue_drain_timeout(nvmf_tq, &sc->request_reconnect_task); + taskqueue_drain_timeout(nvmf_tq, &sc->controller_loss_task); + taskqueue_drain(nvmf_tq, &sc->disconnect_task); sx_destroy(&sc->connection_lock); nvlist_destroy(sc->rparams); free(sc->cdata, M_NVMF); @@ -613,7 +626,7 @@ out: void nvmf_disconnect(struct nvmf_softc *sc) { - taskqueue_enqueue(taskqueue_thread, &sc->disconnect_task); + taskqueue_enqueue(nvmf_tq, &sc->disconnect_task); } static void @@ -676,6 +689,74 @@ nvmf_disconnect_task(void *arg, int pending __unused) nvmf_destroy_qp(sc->admin); sc->admin = NULL; + if (sc->reconnect_delay != 0) + nvmf_request_reconnect(sc); + if (sc->controller_loss_timeout != 0) + taskqueue_enqueue_timeout(nvmf_tq, + &sc->controller_loss_task, sc->controller_loss_timeout * + hz); + + sx_xunlock(&sc->connection_lock); +} + +static void +nvmf_controller_loss_task(void *arg, int pending) +{ + struct nvmf_softc *sc = arg; + device_t dev; + int error; + + bus_topo_lock(); + sx_xlock(&sc->connection_lock); + if (sc->admin != NULL || sc->detaching) { + /* Reconnected or already detaching. */ + sx_xunlock(&sc->connection_lock); + bus_topo_unlock(); + return; + } + + sc->controller_timedout = true; + sx_xunlock(&sc->connection_lock); + + /* + * XXX: Doing this from here is a bit ugly. We don't have an + * extra reference on `dev` but bus_topo_lock should block any + * concurrent device_delete_child invocations. + */ + dev = sc->dev; + error = device_delete_child(root_bus, dev); + if (error != 0) + device_printf(dev, + "failed to detach after controller loss: %d\n", error); + bus_topo_unlock(); +} + +static void +nvmf_request_reconnect(struct nvmf_softc *sc) +{ + char buf[64]; + + sx_assert(&sc->connection_lock, SX_LOCKED); + + snprintf(buf, sizeof(buf), "name=\"%s\"", device_get_nameunit(sc->dev)); + devctl_notify("nvme", "controller", "RECONNECT", buf); + taskqueue_enqueue_timeout(nvmf_tq, &sc->request_reconnect_task, + sc->reconnect_delay * hz); +} + +static void +nvmf_request_reconnect_task(void *arg, int pending) +{ + struct nvmf_softc *sc = arg; + + sx_xlock(&sc->connection_lock); + if (sc->admin != NULL || sc->detaching || sc->controller_timedout) { + /* Reconnected or already detaching. */ + sx_xunlock(&sc->connection_lock); + return; + } + + nvmf_request_reconnect(sc); sx_xunlock(&sc->connection_lock); } @@ -699,7 +780,7 @@ nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_ioc_nv *nv) } sx_xlock(&sc->connection_lock); - if (sc->admin != NULL || sc->detaching) { + if (sc->admin != NULL || sc->detaching || sc->controller_timedout) { error = EBUSY; goto out; } @@ -745,6 +826,9 @@ nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_ioc_nv *nv) nvmf_reconnect_sim(sc); nvmf_rescan_all_ns(sc); + + taskqueue_cancel_timeout(nvmf_tq, &sc->request_reconnect_task, NULL); + taskqueue_cancel_timeout(nvmf_tq, &sc->controller_loss_task, NULL); out: sx_xunlock(&sc->connection_lock); nvlist_destroy(nvl); @@ -852,7 +936,21 @@ nvmf_detach(device_t dev) } free(sc->io, M_NVMF); - taskqueue_drain(taskqueue_thread, &sc->disconnect_task); + taskqueue_drain(nvmf_tq, &sc->disconnect_task); + if (taskqueue_cancel_timeout(nvmf_tq, &sc->request_reconnect_task, + NULL) != 0) + taskqueue_drain_timeout(nvmf_tq, &sc->request_reconnect_task); + + /* + * Don't cancel/drain the controller loss task if that task + * has fired and is triggering the detach. + */ + if (!sc->controller_timedout) { + if (taskqueue_cancel_timeout(nvmf_tq, &sc->controller_loss_task, + NULL) != 0) + taskqueue_drain_timeout(nvmf_tq, + &sc->controller_loss_task); + } if (sc->admin != NULL) nvmf_destroy_qp(sc->admin); @@ -1154,14 +1252,25 @@ static struct cdevsw nvmf_cdevsw = { static int nvmf_modevent(module_t mod, int what, void *arg) { + int error; + switch (what) { case MOD_LOAD: - return (nvmf_ctl_load()); + error = nvmf_ctl_load(); + if (error != 0) + return (error); + + nvmf_tq = taskqueue_create("nvmf", M_WAITOK | M_ZERO, + taskqueue_thread_enqueue, &nvmf_tq); + taskqueue_start_threads(&nvmf_tq, 1, PWAIT, "nvmf taskq"); + return (0); case MOD_QUIESCE: return (0); case MOD_UNLOAD: nvmf_ctl_unload(); destroy_dev_drain(&nvmf_cdevsw); + if (nvmf_tq != NULL) + taskqueue_free(nvmf_tq); return (0); default: return (EOPNOTSUPP); diff --git a/sys/dev/nvmf/host/nvmf_var.h b/sys/dev/nvmf/host/nvmf_var.h index e45a31f413a4..606245b3969c 100644 --- a/sys/dev/nvmf/host/nvmf_var.h +++ b/sys/dev/nvmf/host/nvmf_var.h @@ -75,9 +75,15 @@ struct nvmf_softc { struct callout ka_rx_timer; sbintime_t ka_rx_sbt; + struct timeout_task request_reconnect_task; + struct timeout_task controller_loss_task; + uint32_t reconnect_delay; + uint32_t controller_loss_timeout; + struct sx connection_lock; struct task disconnect_task; bool detaching; + bool controller_timedout; u_int num_aer; struct nvmf_aer *aer; diff --git a/sys/dev/nvmf/nvmf.h b/sys/dev/nvmf/nvmf.h index d4e7b1511e9d..9b2b4c1dea40 100644 --- a/sys/dev/nvmf/nvmf.h +++ b/sys/dev/nvmf/nvmf.h @@ -27,6 +27,13 @@ #define NVMF_NN (1024) /* + * Default timeouts for Fabrics hosts. These match values used by + * Linux. + */ +#define NVMF_DEFAULT_RECONNECT_DELAY 10 +#define NVMF_DEFAULT_CONTROLLER_LOSS 600 + +/* * (data, size) is the userspace buffer for a packed nvlist. * * For requests that copyout an nvlist, len is the amount of data @@ -68,6 +75,8 @@ struct nvmf_ioc_nv { * * number trtype * number kato (optional) + * number reconnect_delay (optional) + * number controller_loss_timeout (optional) * qpair handoff nvlist admin * qpair handoff nvlist array io * binary cdata struct nvme_controller_data @@ -81,6 +90,8 @@ struct nvmf_ioc_nv { * string hostnqn * number num_io_queues * number kato (optional) + * number reconnect_delay (optional) + * number controller_loss_timeout (optional) * number io_qsize * bool sq_flow_control * diff --git a/sys/dev/ofw/ofw_bus_subr.c b/sys/dev/ofw/ofw_bus_subr.c index 4d0479dfb957..b99d784929bc 100644 --- a/sys/dev/ofw/ofw_bus_subr.c +++ b/sys/dev/ofw/ofw_bus_subr.c @@ -634,11 +634,89 @@ ofw_bus_find_iparent(phandle_t node) return (iparent); } +static phandle_t +ofw_bus_search_iparent(phandle_t node) +{ + phandle_t iparent; + + do { + if (OF_getencprop(node, "interrupt-parent", &iparent, + sizeof(iparent)) > 0) { + node = OF_node_from_xref(iparent); + } else { + node = OF_parent(node); + } + if (node == 0) + return (0); + } while (!OF_hasprop(node, "#interrupt-cells")); + + return (OF_xref_from_node(node)); +} + +static int +ofw_bus_traverse_imap(phandle_t inode, phandle_t node, uint32_t *intr, + int intrsz, pcell_t *res, int ressz, phandle_t *iparentp) +{ + struct ofw_bus_iinfo ii; + void *reg; + uint32_t *intrp; + phandle_t iparent; + int rv = 0; + + /* We already have an interrupt controller */ + if (OF_hasprop(node, "interrupt-controller")) + return (0); + + intrp = malloc(intrsz, M_OFWPROP, M_WAITOK); + memcpy(intrp, intr, intrsz); + + while (true) { + /* There is no interrupt-map to follow */ + if (!OF_hasprop(inode, "interrupt-map")) { + free(intrp, M_OFWPROP); + return (0); + } + + memset(&ii, 0, sizeof(ii)); + ofw_bus_setup_iinfo(inode, &ii, sizeof(cell_t)); + + reg = NULL; + if (ii.opi_addrc > 0) + reg = malloc(ii.opi_addrc, M_OFWPROP, M_WAITOK); + + rv = ofw_bus_lookup_imap(node, &ii, reg, ii.opi_addrc, intrp, + intrsz, res, ressz, &iparent); + + free(reg, M_OFWPROP); + free(ii.opi_imap, M_OFWPROP); + free(ii.opi_imapmsk, M_OFWPROP); + free(intrp, M_OFWPROP); + + if (rv == 0) + return (0); + + node = inode; + inode = OF_node_from_xref(iparent); + + /* Stop when we have an interrupt controller */ + if (OF_hasprop(inode, "interrupt-controller")) { + *iparentp = iparent; + return (rv); + } + + intrsz = rv * sizeof(pcell_t); + intrp = malloc(intrsz, M_OFWPROP, M_WAITOK); + memcpy(intrp, res, intrsz); + } +} + int ofw_bus_intr_to_rl(device_t dev, phandle_t node, struct resource_list *rl, int *rlen) { - phandle_t iparent; + phandle_t iparent, iparent_node; + uint32_t result[16]; + uint32_t intrpcells, *intrp; uint32_t icells, *intr; int err, i, irqnum, nintr, rid; bool extended; @@ -646,15 +724,16 @@ ofw_bus_intr_to_rl(device_t dev, phandle_t node, nintr = OF_getencprop_alloc_multi(node, "interrupts", sizeof(*intr), (void **)&intr); if (nintr > 0) { - iparent = ofw_bus_find_iparent(node); + iparent = ofw_bus_search_iparent(node); if (iparent == 0) { device_printf(dev, "No interrupt-parent found, " "assuming direct parent\n"); iparent = OF_parent(node); iparent = OF_xref_from_node(iparent); } - if (OF_searchencprop(OF_node_from_xref(iparent), - "#interrupt-cells", &icells, sizeof(icells)) == -1) { + iparent_node = OF_node_from_xref(iparent); + if (OF_searchencprop(iparent_node, "#interrupt-cells", &icells, + sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property, assuming <1>\n"); icells = 1; @@ -677,7 +756,8 @@ ofw_bus_intr_to_rl(device_t dev, phandle_t node, for (i = 0; i < nintr; i += icells) { if (extended) { iparent = intr[i++]; - if (OF_searchencprop(OF_node_from_xref(iparent), + iparent_node = OF_node_from_xref(iparent); + if (OF_searchencprop(iparent_node, "#interrupt-cells", &icells, sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property\n"); @@ -691,7 +771,16 @@ ofw_bus_intr_to_rl(device_t dev, phandle_t node, break; } } - irqnum = ofw_bus_map_intr(dev, iparent, icells, &intr[i]); + + intrp = &intr[i]; + intrpcells = ofw_bus_traverse_imap(iparent_node, node, intrp, + icells * sizeof(intr[0]), result, sizeof(result), &iparent); + if (intrpcells > 0) + intrp = result; + else + intrpcells = icells; + + irqnum = ofw_bus_map_intr(dev, iparent, intrpcells, intrp); resource_list_add(rl, SYS_RES_IRQ, rid++, irqnum, irqnum, 1); } if (rlen != NULL) diff --git a/sys/dev/pci/pci_iov.c b/sys/dev/pci/pci_iov.c index 1f72391fb6b4..0efcfeac9eff 100644 --- a/sys/dev/pci/pci_iov.c +++ b/sys/dev/pci/pci_iov.c @@ -734,11 +734,18 @@ pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg) first_rid = pci_get_rid(dev) + rid_off; last_rid = first_rid + (num_vfs - 1) * rid_stride; - /* We don't yet support allocating extra bus numbers for VFs. */ if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) { - device_printf(dev, "not enough PCIe bus numbers for VFs\n"); - error = ENOSPC; - goto out; + int rid = 0; + uint16_t last_rid_bus = PCI_RID2BUS(last_rid); + + iov->iov_bus_res = bus_alloc_resource(bus, PCI_RES_BUS, &rid, + last_rid_bus, last_rid_bus, 1, RF_ACTIVE); + if (iov->iov_bus_res == NULL) { + device_printf(dev, + "failed to allocate PCIe bus number for VFs\n"); + error = ENOSPC; + goto out; + } } if (!ari_enabled && PCI_RID2SLOT(last_rid) != 0) { @@ -786,6 +793,11 @@ out: } } + if (iov->iov_bus_res != NULL) { + bus_release_resource(bus, iov->iov_bus_res); + iov->iov_bus_res = NULL; + } + if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; @@ -896,6 +908,11 @@ pci_iov_delete_iov_children(struct pci_devinfo *dinfo) } } + if (iov->iov_bus_res != NULL) { + bus_release_resource(bus, iov->iov_bus_res); + iov->iov_bus_res = NULL; + } + if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; diff --git a/sys/dev/pci/pci_iov_private.h b/sys/dev/pci/pci_iov_private.h index 7ae2219b936d..ecf0a9b21be5 100644 --- a/sys/dev/pci/pci_iov_private.h +++ b/sys/dev/pci/pci_iov_private.h @@ -39,6 +39,8 @@ struct pcicfg_iov { struct cdev *iov_cdev; nvlist_t *iov_schema; + struct resource *iov_bus_res; + struct pci_iov_bar iov_bar[PCIR_MAX_BAR_0 + 1]; struct rman rman; char rman_name[64]; diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c index 05ec69a70dfe..4ad190374f87 100644 --- a/sys/dev/qlnx/qlnxe/qlnx_os.c +++ b/sys/dev/qlnx/qlnxe/qlnx_os.c @@ -30,6 +30,8 @@ * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. */ +#include "opt_inet.h" + #include <sys/cdefs.h> #include "qlnx_os.h" #include "bcm_osal.h" @@ -2306,8 +2308,6 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) if_setbaudrate(ifp, IF_Gbps(100)); - if_setcapabilities(ifp, IFCAP_LINKSTATE); - if_setinitfn(ifp, qlnx_init); if_setsoftc(ifp, ha); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); @@ -2341,7 +2341,6 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) if_setcapabilities(ifp, IFCAP_HWCSUM); if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); - if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); @@ -2350,6 +2349,8 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); + if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); + if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0); if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); @@ -2778,7 +2779,7 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) if (!p_ptt) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); - ret = -1; + ret = ERESTART; break; } @@ -2789,7 +2790,7 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) ecore_ptt_release(p_hwfn, p_ptt); if (ret) { - ret = -1; + ret = ENODEV; break; } diff --git a/sys/dev/random/fortuna.c b/sys/dev/random/fortuna.c index c4282c723a44..8363de99a60a 100644 --- a/sys/dev/random/fortuna.c +++ b/sys/dev/random/fortuna.c @@ -341,6 +341,13 @@ random_fortuna_process_event(struct harvest_event *event) u_int pl; RANDOM_RESEED_LOCK(); + /* + * Run SP 800-90B health tests on the source if so configured. + */ + if (!random_harvest_healthtest(event)) { + RANDOM_RESEED_UNLOCK(); + return; + } /*- * FS&K - P_i = P_i|<harvested stuff> * Accumulate the event into the appropriate pool diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c index ee37bda36496..c7762967c4fb 100644 --- a/sys/dev/random/random_harvestq.c +++ b/sys/dev/random/random_harvestq.c @@ -88,6 +88,8 @@ static void random_sources_feed(void); static __read_mostly bool epoch_inited; static __read_mostly epoch_t rs_epoch; +static const char *random_source_descr[ENTROPYSOURCE]; + /* * How many events to queue up. We create this many items in * an 'empty' queue, then transfer them to the 'harvest' queue with @@ -131,30 +133,14 @@ static struct harvest_context { /* The context of the kernel thread processing harvested entropy */ struct proc *hc_kthread_proc; /* - * Lockless ring buffer holding entropy events - * If ring.in == ring.out, - * the buffer is empty. - * If ring.in != ring.out, - * the buffer contains harvested entropy. - * If (ring.in + 1) == ring.out (mod RANDOM_RING_MAX), - * the buffer is full. - * - * NOTE: ring.in points to the last added element, - * and ring.out points to the last consumed element. - * - * The ring.in variable needs locking as there are multiple - * sources to the ring. Only the sources may change ring.in, - * but the consumer may examine it. - * - * The ring.out variable does not need locking as there is - * only one consumer. Only the consumer may change ring.out, - * but the sources may examine it. + * A pair of buffers for queued events. New events are added to the + * active queue while the kthread processes the other one in parallel. */ - struct entropy_ring { + struct entropy_buffer { struct harvest_event ring[RANDOM_RING_MAX]; - volatile u_int in; - volatile u_int out; - } hc_entropy_ring; + u_int pos; + } hc_entropy_buf[2]; + u_int hc_active_buf; struct fast_entropy_accumulator { volatile u_int pos; uint32_t buf[RANDOM_ACCUM_MAX]; @@ -183,37 +169,41 @@ random_harvestq_fast_process_event(struct harvest_event *event) static void random_kthread(void) { - u_int maxloop, ring_out, i; + struct harvest_context *hc; - /* - * Locking is not needed as this is the only place we modify ring.out, and - * we only examine ring.in without changing it. Both of these are volatile, - * and this is a unique thread. - */ + hc = &harvest_context; for (random_kthread_control = 1; random_kthread_control;) { - /* Deal with events, if any. Restrict the number we do in one go. */ - maxloop = RANDOM_RING_MAX; - while (harvest_context.hc_entropy_ring.out != harvest_context.hc_entropy_ring.in) { - ring_out = (harvest_context.hc_entropy_ring.out + 1)%RANDOM_RING_MAX; - random_harvestq_fast_process_event(harvest_context.hc_entropy_ring.ring + ring_out); - harvest_context.hc_entropy_ring.out = ring_out; - if (!--maxloop) - break; - } + struct entropy_buffer *buf; + u_int entries; + + /* Deal with queued events. */ + RANDOM_HARVEST_LOCK(); + buf = &hc->hc_entropy_buf[hc->hc_active_buf]; + entries = buf->pos; + buf->pos = 0; + hc->hc_active_buf = (hc->hc_active_buf + 1) % + nitems(hc->hc_entropy_buf); + RANDOM_HARVEST_UNLOCK(); + for (u_int i = 0; i < entries; i++) + random_harvestq_fast_process_event(&buf->ring[i]); + + /* Poll sources of noise. */ random_sources_feed(); + /* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */ - for (i = 0; i < RANDOM_ACCUM_MAX; i++) { - if (harvest_context.hc_entropy_fast_accumulator.buf[i]) { - random_harvest_direct(harvest_context.hc_entropy_fast_accumulator.buf + i, sizeof(harvest_context.hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA); - harvest_context.hc_entropy_fast_accumulator.buf[i] = 0; + for (u_int i = 0; i < RANDOM_ACCUM_MAX; i++) { + if (hc->hc_entropy_fast_accumulator.buf[i]) { + random_harvest_direct(&hc->hc_entropy_fast_accumulator.buf[i], + sizeof(hc->hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA); + hc->hc_entropy_fast_accumulator.buf[i] = 0; } } /* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */ - tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", + tsleep_sbt(&hc->hc_kthread_proc, 0, "-", SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1)); } random_kthread_control = -1; - wakeup(&harvest_context.hc_kthread_proc); + wakeup(&hc->hc_kthread_proc); kproc_exit(0); /* NOTREACHED */ } @@ -311,6 +301,230 @@ random_sources_feed(void) explicit_bzero(entropy, sizeof(entropy)); } +/* + * State used for conducting NIST SP 800-90B health tests on entropy sources. + */ +static struct health_test_softc { + uint32_t ht_rct_value[HARVESTSIZE + 1]; + u_int ht_rct_count; /* number of samples with the same value */ + u_int ht_rct_limit; /* constant after init */ + + uint32_t ht_apt_value[HARVESTSIZE + 1]; + u_int ht_apt_count; /* number of samples with the same value */ + u_int ht_apt_seq; /* sequence number of the last sample */ + u_int ht_apt_cutoff; /* constant after init */ + + uint64_t ht_total_samples; + bool ondemand; /* Set to true to restart the state machine */ + enum { + INIT = 0, /* initial state */ + DISABLED, /* health checking is disabled */ + STARTUP, /* doing startup tests, samples are discarded */ + STEADY, /* steady-state operation */ + FAILED, /* health check failed, discard samples */ + } ht_state; +} healthtest[ENTROPYSOURCE]; + +#define RANDOM_SELFTEST_STARTUP_SAMPLES 1024 /* 4.3, requirement 4 */ +#define RANDOM_SELFTEST_APT_WINDOW 512 /* 4.4.2 */ + +static void +copy_event(uint32_t dst[static HARVESTSIZE + 1], + const struct harvest_event *event) +{ + memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1)); + memcpy(dst, event->he_entropy, event->he_size); + dst[HARVESTSIZE] = event->he_somecounter; +} + +static void +random_healthtest_rct_init(struct health_test_softc *ht, + const struct harvest_event *event) +{ + ht->ht_rct_count = 1; + copy_event(ht->ht_rct_value, event); +} + +/* + * Apply the repitition count test to a sample. + * + * Return false if the test failed, i.e., we observed >= C consecutive samples + * with the same value, and true otherwise. + */ +static bool +random_healthtest_rct_next(struct health_test_softc *ht, + const struct harvest_event *event) +{ + uint32_t val[HARVESTSIZE + 1]; + + copy_event(val, event); + if (memcmp(val, ht->ht_rct_value, sizeof(ht->ht_rct_value)) != 0) { + ht->ht_rct_count = 1; + memcpy(ht->ht_rct_value, val, sizeof(ht->ht_rct_value)); + return (true); + } else { + ht->ht_rct_count++; + return (ht->ht_rct_count < ht->ht_rct_limit); + } +} + +static void +random_healthtest_apt_init(struct health_test_softc *ht, + const struct harvest_event *event) +{ + ht->ht_apt_count = 1; + ht->ht_apt_seq = 1; + copy_event(ht->ht_apt_value, event); +} + +static bool +random_healthtest_apt_next(struct health_test_softc *ht, + const struct harvest_event *event) +{ + uint32_t val[HARVESTSIZE + 1]; + + if (ht->ht_apt_seq == 0) { + random_healthtest_apt_init(ht, event); + return (true); + } + + copy_event(val, event); + if (memcmp(val, ht->ht_apt_value, sizeof(ht->ht_apt_value)) == 0) { + ht->ht_apt_count++; + if (ht->ht_apt_count >= ht->ht_apt_cutoff) + return (false); + } + + ht->ht_apt_seq++; + if (ht->ht_apt_seq == RANDOM_SELFTEST_APT_WINDOW) + ht->ht_apt_seq = 0; + + return (true); +} + +/* + * Run the health tests for the given event. This is assumed to be called from + * a serialized context. + */ +bool +random_harvest_healthtest(const struct harvest_event *event) +{ + struct health_test_softc *ht; + + ht = &healthtest[event->he_source]; + + /* + * Was on-demand testing requested? Restart the state machine if so, + * restarting the startup tests. + */ + if (atomic_load_bool(&ht->ondemand)) { + atomic_store_bool(&ht->ondemand, false); + ht->ht_state = INIT; + } + + switch (ht->ht_state) { + case __predict_false(INIT): + /* Store the first sample and initialize test state. */ + random_healthtest_rct_init(ht, event); + random_healthtest_apt_init(ht, event); + ht->ht_total_samples = 0; + ht->ht_state = STARTUP; + return (false); + case DISABLED: + /* No health testing for this source. */ + return (true); + case STEADY: + case STARTUP: + ht->ht_total_samples++; + if (random_healthtest_rct_next(ht, event) && + random_healthtest_apt_next(ht, event)) { + if (ht->ht_state == STARTUP && + ht->ht_total_samples >= + RANDOM_SELFTEST_STARTUP_SAMPLES) { + printf( + "random: health test passed for source %s\n", + random_source_descr[event->he_source]); + ht->ht_state = STEADY; + } + return (ht->ht_state == STEADY); + } + ht->ht_state = FAILED; + printf( + "random: health test failed for source %s, discarding samples\n", + random_source_descr[event->he_source]); + /* FALLTHROUGH */ + case FAILED: + return (false); + } +} + +static bool nist_healthtest_enabled = false; +SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled, + CTLFLAG_RDTUN, &nist_healthtest_enabled, 0, + "Enable NIST SP 800-90B health tests for noise sources"); + +static void +random_healthtest_init(enum random_entropy_source source) +{ + struct health_test_softc *ht; + + ht = &healthtest[source]; + KASSERT(ht->ht_state == INIT, + ("%s: health test state is %d for source %d", + __func__, ht->ht_state, source)); + + /* + * If health-testing is enabled, validate all sources except CACHED and + * VMGENID: they are deterministic sources used only a small, fixed + * number of times, so statistical testing is not applicable. + */ + if (!nist_healthtest_enabled || + source == RANDOM_CACHED || source == RANDOM_PURE_VMGENID) { + ht->ht_state = DISABLED; + return; + } + + /* + * Set cutoff values for the two tests, assuming that each sample has + * min-entropy of 1 bit and allowing for an error rate of 1 in 2^{34}. + * With a sample rate of RANDOM_KTHREAD_HZ, we expect to see an false + * positive once in ~54.5 years. + * + * The RCT limit comes from the formula in section 4.4.1. + * + * The APT cutoff is calculated using the formula in section 4.4.2 + * footnote 10 with the window size changed from 512 to 511, since the + * test as written counts the number of samples equal to the first + * sample in the window, and thus tests W-1 samples. + */ + ht->ht_rct_limit = 35; + ht->ht_apt_cutoff = 330; +} + +static int +random_healthtest_ondemand(SYSCTL_HANDLER_ARGS) +{ + u_int mask, source; + int error; + + mask = 0; + error = sysctl_handle_int(oidp, &mask, 0, req); + if (error != 0 || req->newptr == NULL) + return (error); + + while (mask != 0) { + source = ffs(mask) - 1; + if (source < nitems(healthtest)) + atomic_store_bool(&healthtest[source].ondemand, true); + mask &= ~(1u << source); + } + return (0); +} +SYSCTL_PROC(_kern_random, OID_AUTO, nist_healthtest_ondemand, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, + random_healthtest_ondemand, "I", + "Re-run NIST SP 800-90B startup health tests for a noise source"); + static int random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS) { @@ -374,7 +588,8 @@ static const char *random_source_descr[ENTROPYSOURCE] = { [RANDOM_SWI] = "SWI", [RANDOM_FS_ATIME] = "FS_ATIME", [RANDOM_UMA] = "UMA", - [RANDOM_CALLOUT] = "CALLOUT", /* ENVIRONMENTAL_END */ + [RANDOM_CALLOUT] = "CALLOUT", + [RANDOM_RANDOMDEV] = "RANDOMDEV", /* ENVIRONMENTAL_END */ [RANDOM_PURE_OCTEON] = "PURE_OCTEON", /* PURE_START */ [RANDOM_PURE_SAFE] = "PURE_SAFE", [RANDOM_PURE_GLXSB] = "PURE_GLXSB", @@ -435,7 +650,10 @@ random_harvestq_init(void *unused __unused) hc_source_mask = almost_everything_mask; RANDOM_HARVEST_INIT_LOCK(); - harvest_context.hc_entropy_ring.in = harvest_context.hc_entropy_ring.out = 0; + harvest_context.hc_active_buf = 0; + + for (int i = 0; i < ENTROPYSOURCE; i++) + random_healthtest_init(i); } SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL); @@ -540,9 +758,9 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d * This is supposed to be fast; do not do anything slow in here! * It is also illegal (and morally reprehensible) to insert any * high-rate data here. "High-rate" is defined as a data source - * that will usually cause lots of failures of the "Lockless read" - * check a few lines below. This includes the "always-on" sources - * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources. + * that is likely to fill up the buffer in much less than 100ms. + * This includes the "always-on" sources like the Intel "rdrand" + * or the VIA Nehamiah "xstore" sources. */ /* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle * counters are built in, but on older hardware it will do a real time clock @@ -551,28 +769,29 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d void random_harvest_queue_(const void *entropy, u_int size, enum random_entropy_source origin) { + struct harvest_context *hc; + struct entropy_buffer *buf; struct harvest_event *event; - u_int ring_in; - KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin)); + KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, + ("%s: origin %d invalid", __func__, origin)); + + hc = &harvest_context; RANDOM_HARVEST_LOCK(); - ring_in = (harvest_context.hc_entropy_ring.in + 1)%RANDOM_RING_MAX; - if (ring_in != harvest_context.hc_entropy_ring.out) { - /* The ring is not full */ - event = harvest_context.hc_entropy_ring.ring + ring_in; + buf = &hc->hc_entropy_buf[hc->hc_active_buf]; + if (buf->pos < RANDOM_RING_MAX) { + event = &buf->ring[buf->pos++]; event->he_somecounter = random_get_cyclecount(); event->he_source = origin; - event->he_destination = harvest_context.hc_destination[origin]++; + event->he_destination = hc->hc_destination[origin]++; if (size <= sizeof(event->he_entropy)) { event->he_size = size; memcpy(event->he_entropy, entropy, size); - } - else { + } else { /* Big event, so squash it */ event->he_size = sizeof(event->he_entropy[0]); event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event); } - harvest_context.hc_entropy_ring.in = ring_in; } RANDOM_HARVEST_UNLOCK(); } diff --git a/sys/dev/random/random_harvestq.h b/sys/dev/random/random_harvestq.h index 7804bf52aa4f..1d462500df85 100644 --- a/sys/dev/random/random_harvestq.h +++ b/sys/dev/random/random_harvestq.h @@ -49,4 +49,6 @@ random_get_cyclecount(void) return ((uint32_t)get_cyclecount()); } +bool random_harvest_healthtest(const struct harvest_event *event); + #endif /* SYS_DEV_RANDOM_RANDOM_HARVESTQ_H_INCLUDED */ diff --git a/sys/dev/random/randomdev.c b/sys/dev/random/randomdev.c index 9d1c7b1167c8..ced4dd8067d9 100644 --- a/sys/dev/random/randomdev.c +++ b/sys/dev/random/randomdev.c @@ -312,7 +312,7 @@ randomdev_accumulate(uint8_t *buf, u_int count) for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) { event.he_somecounter = random_get_cyclecount(); event.he_size = sizeof(event.he_entropy); - event.he_source = RANDOM_CACHED; + event.he_source = RANDOM_RANDOMDEV; event.he_destination = destination++; /* Harmless cheating */ memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy)); p_random_alg_context->ra_event_processor(&event); diff --git a/sys/dev/regulator/regulator_fixed.c b/sys/dev/regulator/regulator_fixed.c index 0a76da7140a0..55cdb5e4aeae 100644 --- a/sys/dev/regulator/regulator_fixed.c +++ b/sys/dev/regulator/regulator_fixed.c @@ -100,12 +100,8 @@ static struct gpio_entry * regnode_get_gpio_entry(struct gpiobus_pin *gpio_pin) { struct gpio_entry *entry, *tmp; - device_t busdev; int rv; - busdev = GPIO_GET_BUS(gpio_pin->dev); - if (busdev == NULL) - return (NULL); entry = malloc(sizeof(struct gpio_entry), M_FIXEDREGULATOR, M_WAITOK | M_ZERO); @@ -122,8 +118,8 @@ regnode_get_gpio_entry(struct gpiobus_pin *gpio_pin) } /* Reserve pin. */ - /* XXX Can we call gpiobus_acquire_pin() with gpio_list_mtx held? */ - rv = gpiobus_acquire_pin(busdev, gpio_pin->pin); + /* XXX Can we call gpio_pin_acquire() with gpio_list_mtx held? */ + rv = gpio_pin_acquire(gpio_pin); if (rv != 0) { mtx_unlock(&gpio_list_mtx); free(entry, M_FIXEDREGULATOR); diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c index fbfb69de2913..6753f864ba9c 100644 --- a/sys/dev/sound/midi/midi.c +++ b/sys/dev/sound/midi/midi.c @@ -30,12 +30,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ - /* - * Parts of this file started out as NetBSD: midi.c 1.31 - * They are mostly gone. Still the most obvious will be the state - * machine midi_in - */ - #include <sys/param.h> #include <sys/systm.h> #include <sys/queue.h> @@ -66,7 +60,6 @@ #include "mpu_if.h" #include <dev/sound/midi/midiq.h> -#include "synth_if.h" MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area"); #ifndef KOBJMETHOD_END @@ -79,17 +72,6 @@ enum midi_states { MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA }; -/* - * The MPU interface current has init() uninit() inqsize() outqsize() - * callback() : fiddle with the tx|rx status. - */ - -#include "mpu_if.h" - -/* - * /dev/rmidi Structure definitions - */ - #define MIDI_NAMELEN 16 struct snd_midi { KOBJ_FIELDS; @@ -115,95 +97,13 @@ struct snd_midi { * complete command packets. */ struct proc *async; struct cdev *dev; - struct synth_midi *synth; - int synth_flags; TAILQ_ENTRY(snd_midi) link; }; -struct synth_midi { - KOBJ_FIELDS; - struct snd_midi *m; -}; - -static synth_open_t midisynth_open; -static synth_close_t midisynth_close; -static synth_writeraw_t midisynth_writeraw; -static synth_killnote_t midisynth_killnote; -static synth_startnote_t midisynth_startnote; -static synth_setinstr_t midisynth_setinstr; -static synth_alloc_t midisynth_alloc; -static synth_controller_t midisynth_controller; -static synth_bender_t midisynth_bender; - -static kobj_method_t midisynth_methods[] = { - KOBJMETHOD(synth_open, midisynth_open), - KOBJMETHOD(synth_close, midisynth_close), - KOBJMETHOD(synth_writeraw, midisynth_writeraw), - KOBJMETHOD(synth_setinstr, midisynth_setinstr), - KOBJMETHOD(synth_startnote, midisynth_startnote), - KOBJMETHOD(synth_killnote, midisynth_killnote), - KOBJMETHOD(synth_alloc, midisynth_alloc), - KOBJMETHOD(synth_controller, midisynth_controller), - KOBJMETHOD(synth_bender, midisynth_bender), - KOBJMETHOD_END -}; - -DEFINE_CLASS(midisynth, midisynth_methods, 0); - -/* - * Module Exports & Interface - * - * struct midi_chan *midi_init(MPU_CLASS cls, int unit, int chan, - * void *cookie) - * int midi_uninit(struct snd_midi *) - * - * 0 == no error - * EBUSY or other error - * - * int midi_in(struct snd_midi *, char *buf, int count) - * int midi_out(struct snd_midi *, char *buf, int count) - * - * midi_{in,out} return actual size transfered - * - */ - -/* - * midi_devs tailq, holder of all rmidi instances protected by midistat_lock - */ - TAILQ_HEAD(, snd_midi) midi_devs; -/* - * /dev/midistat variables and declarations, protected by midistat_lock - */ - struct sx mstat_lock; -static int midistat_isopen = 0; -static struct sbuf midistat_sbuf; -static struct cdev *midistat_dev; - -/* - * /dev/midistat dev_t declarations - */ - -static d_open_t midistat_open; -static d_close_t midistat_close; -static d_read_t midistat_read; - -static struct cdevsw midistat_cdevsw = { - .d_version = D_VERSION, - .d_open = midistat_open, - .d_close = midistat_close, - .d_read = midistat_read, - .d_name = "midistat", -}; - -/* - * /dev/rmidi dev_t declarations, struct variable access is protected by - * locks contained within the structure. - */ - static d_open_t midi_open; static d_close_t midi_close; static d_ioctl_t midi_ioctl; @@ -222,41 +122,18 @@ static struct cdevsw midi_cdevsw = { .d_name = "rmidi", }; -/* - * Prototypes of library functions - */ - static int midi_destroy(struct snd_midi *, int); -static int midistat_prepare(struct sbuf * s); static int midi_load(void); static int midi_unload(void); -/* - * Misc declr. - */ SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Midi driver"); -static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, - "Status device"); int midi_debug; /* XXX: should this be moved into debug.midi? */ SYSCTL_INT(_hw_midi, OID_AUTO, debug, CTLFLAG_RW, &midi_debug, 0, ""); -int midi_dumpraw; -SYSCTL_INT(_hw_midi, OID_AUTO, dumpraw, CTLFLAG_RW, &midi_dumpraw, 0, ""); - -int midi_instroff; -SYSCTL_INT(_hw_midi, OID_AUTO, instroff, CTLFLAG_RW, &midi_instroff, 0, ""); - -int midistat_verbose; -SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW, - &midistat_verbose, 0, ""); - #define MIDI_DEBUG(l,a) if(midi_debug>=l) a -/* - * CODE START - */ void midistat_lock(void) @@ -285,9 +162,6 @@ midistat_lockassert(void) * what unit number is used. * * It is an error to call midi_init with an already used unit/channel combo. - * - * Returns NULL on error - * */ struct snd_midi * midi_init(kobj_class_t cls, int unit, int channel, void *cookie) @@ -326,9 +200,6 @@ midi_init(kobj_class_t cls, int unit, int channel, void *cookie) MIDI_DEBUG(1, printf("midiinit #2: unit %d/%d.\n", unit, channel)); m = malloc(sizeof(*m), M_MIDI, M_WAITOK | M_ZERO); - m->synth = malloc(sizeof(*m->synth), M_MIDI, M_WAITOK | M_ZERO); - kobj_init((kobj_t)m->synth, &midisynth_class); - m->synth->m = m; kobj_init((kobj_t)m, cls); inqsize = MPU_INQSIZE(m, cookie); outqsize = MPU_OUTQSIZE(m, cookie); @@ -393,7 +264,6 @@ err2: if (MIDIQ_BUF(m->outq)) free(MIDIQ_BUF(m->outq), M_MIDI); err1: - free(m->synth, M_MIDI); free(m, M_MIDI); err0: midistat_unlock(); @@ -405,9 +275,7 @@ err0: * midi_uninit does not call MIDI_UNINIT, as since this is the implementors * entry point. midi_uninit if fact, does not send any methods. A call to * midi_uninit is a defacto promise that you won't manipulate ch anymore - * */ - int midi_uninit(struct snd_midi *m) { @@ -440,13 +308,6 @@ exit: return err; } -/* - * midi_in: process all data until the queue is full, then discards the rest. - * Since midi_in is a state machine, data discards can cause it to get out of - * whack. Process as much as possible. It calls, wakeup, selnotify and - * psignal at most once. - */ - #ifdef notdef static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0}; @@ -460,6 +321,12 @@ static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0}; #define MIDI_SYSEX_START 0xF0 #define MIDI_SYSEX_END 0xF7 +/* + * midi_in: process all data until the queue is full, then discards the rest. + * Since midi_in is a state machine, data discards can cause it to get out of + * whack. Process as much as possible. It calls, wakeup, selnotify and + * psignal at most once. + */ int midi_in(struct snd_midi *m, uint8_t *buf, int size) { @@ -627,9 +494,6 @@ midi_out(struct snd_midi *m, uint8_t *buf, int size) return used; } -/* - * /dev/rmidi#.# device access functions - */ int midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { @@ -934,434 +798,6 @@ midi_poll(struct cdev *i_dev, int events, struct thread *td) } /* - * /dev/midistat device functions - * - */ -static int -midistat_open(struct cdev *i_dev, int flags, int mode, struct thread *td) -{ - int error; - - MIDI_DEBUG(1, printf("midistat_open\n")); - - midistat_lock(); - if (midistat_isopen) { - midistat_unlock(); - return EBUSY; - } - midistat_isopen = 1; - sbuf_new(&midistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND); - error = (midistat_prepare(&midistat_sbuf) > 0) ? 0 : ENOMEM; - if (error) - midistat_isopen = 0; - midistat_unlock(); - return error; -} - -static int -midistat_close(struct cdev *i_dev, int flags, int mode, struct thread *td) -{ - MIDI_DEBUG(1, printf("midistat_close\n")); - midistat_lock(); - if (!midistat_isopen) { - midistat_unlock(); - return EBADF; - } - sbuf_delete(&midistat_sbuf); - midistat_isopen = 0; - midistat_unlock(); - return 0; -} - -static int -midistat_read(struct cdev *i_dev, struct uio *uio, int flag) -{ - long l; - int err; - - MIDI_DEBUG(4, printf("midistat_read\n")); - midistat_lock(); - if (!midistat_isopen) { - midistat_unlock(); - return EBADF; - } - if (uio->uio_offset < 0 || uio->uio_offset > sbuf_len(&midistat_sbuf)) { - midistat_unlock(); - return EINVAL; - } - err = 0; - l = lmin(uio->uio_resid, sbuf_len(&midistat_sbuf) - uio->uio_offset); - if (l > 0) { - err = uiomove(sbuf_data(&midistat_sbuf) + uio->uio_offset, l, - uio); - } - midistat_unlock(); - return err; -} - -/* - * Module library functions - */ - -static int -midistat_prepare(struct sbuf *s) -{ - struct snd_midi *m; - - midistat_lockassert(); - - sbuf_printf(s, "FreeBSD Midi Driver (midi2)\n"); - if (TAILQ_EMPTY(&midi_devs)) { - sbuf_printf(s, "No devices installed.\n"); - sbuf_finish(s); - return sbuf_len(s); - } - sbuf_printf(s, "Installed devices:\n"); - - TAILQ_FOREACH(m, &midi_devs, link) { - mtx_lock(&m->lock); - sbuf_printf(s, "%s [%d/%d:%s]", m->name, m->unit, m->channel, - MPU_PROVIDER(m, m->cookie)); - sbuf_printf(s, "%s", MPU_DESCR(m, m->cookie, midistat_verbose)); - sbuf_printf(s, "\n"); - mtx_unlock(&m->lock); - } - - sbuf_finish(s); - return sbuf_len(s); -} - -#ifdef notdef -/* - * Convert IOCTL command to string for debugging - */ - -static char * -midi_cmdname(int cmd) -{ - static struct { - int cmd; - char *name; - } *tab, cmdtab_midiioctl[] = { -#define A(x) {x, ## x} - /* - * Once we have some real IOCTLs define, the following will - * be relavant. - * - * A(SNDCTL_MIDI_PRETIME), A(SNDCTL_MIDI_MPUMODE), - * A(SNDCTL_MIDI_MPUCMD), A(SNDCTL_SYNTH_INFO), - * A(SNDCTL_MIDI_INFO), A(SNDCTL_SYNTH_MEMAVL), - * A(SNDCTL_FM_LOAD_INSTR), A(SNDCTL_FM_4OP_ENABLE), - * A(MIOSPASSTHRU), A(MIOGPASSTHRU), A(AIONWRITE), - * A(AIOGSIZE), A(AIOSSIZE), A(AIOGFMT), A(AIOSFMT), - * A(AIOGMIX), A(AIOSMIX), A(AIOSTOP), A(AIOSYNC), - * A(AIOGCAP), - */ -#undef A - { - -1, "unknown" - }, - }; - - for (tab = cmdtab_midiioctl; tab->cmd != cmd && tab->cmd != -1; tab++); - return tab->name; -} - -#endif /* notdef */ - -/* - * midisynth - */ - -int -midisynth_open(void *n, void *arg, int flags) -{ - struct snd_midi *m = ((struct synth_midi *)n)->m; - int retval; - - MIDI_DEBUG(1, printf("midisynth_open %s %s\n", - flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); - - if (m == NULL) - return ENXIO; - - mtx_lock(&m->lock); - mtx_lock(&m->qlock); - - retval = 0; - - if (flags & FREAD) { - if (MIDIQ_SIZE(m->inq) == 0) - retval = ENXIO; - else if (m->flags & M_RX) - retval = EBUSY; - if (retval) - goto err; - } - if (flags & FWRITE) { - if (MIDIQ_SIZE(m->outq) == 0) - retval = ENXIO; - else if (m->flags & M_TX) - retval = EBUSY; - if (retval) - goto err; - } - m->busy++; - - /* - * TODO: Consider m->async = 0; - */ - - if (flags & FREAD) { - m->flags |= M_RX | M_RXEN; - /* - * Only clear the inq, the outq might still have data to drain - * from a previous session - */ - MIDIQ_CLEAR(m->inq); - m->rchan = 0; - } - - if (flags & FWRITE) { - m->flags |= M_TX; - m->wchan = 0; - } - m->synth_flags = flags & (FREAD | FWRITE); - - MPU_CALLBACK(m, m->cookie, m->flags); - -err: mtx_unlock(&m->qlock); - mtx_unlock(&m->lock); - MIDI_DEBUG(2, printf("midisynth_open: return %d.\n", retval)); - return retval; -} - -int -midisynth_close(void *n) -{ - struct snd_midi *m = ((struct synth_midi *)n)->m; - int retval; - int oldflags; - - MIDI_DEBUG(1, printf("midisynth_close %s %s\n", - m->synth_flags & FREAD ? "M_RX" : "", - m->synth_flags & FWRITE ? "M_TX" : "")); - - if (m == NULL) - return ENXIO; - - mtx_lock(&m->lock); - mtx_lock(&m->qlock); - - if ((m->synth_flags & FREAD && !(m->flags & M_RX)) || - (m->synth_flags & FWRITE && !(m->flags & M_TX))) { - retval = ENXIO; - goto err; - } - m->busy--; - - oldflags = m->flags; - - if (m->synth_flags & FREAD) - m->flags &= ~(M_RX | M_RXEN); - if (m->synth_flags & FWRITE) - m->flags &= ~M_TX; - - if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN))) - MPU_CALLBACK(m, m->cookie, m->flags); - - MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy)); - - mtx_unlock(&m->qlock); - mtx_unlock(&m->lock); - retval = 0; -err: return retval; -} - -/* - * Always blocking. - */ - -int -midisynth_writeraw(void *n, uint8_t *buf, size_t len) -{ - struct snd_midi *m = ((struct synth_midi *)n)->m; - int retval; - int used; - int i; - - MIDI_DEBUG(4, printf("midisynth_writeraw\n")); - - retval = 0; - - if (m == NULL) - return ENXIO; - - mtx_lock(&m->lock); - mtx_lock(&m->qlock); - - if (!(m->flags & M_TX)) - goto err1; - - if (midi_dumpraw) - printf("midi dump: "); - - while (len > 0) { - while (MIDIQ_AVAIL(m->outq) == 0) { - if (!(m->flags & M_TXEN)) { - m->flags |= M_TXEN; - MPU_CALLBACK(m, m->cookie, m->flags); - } - mtx_unlock(&m->lock); - m->wchan = 1; - MIDI_DEBUG(3, printf("midisynth_writeraw msleep\n")); - retval = msleep(&m->wchan, &m->qlock, - PCATCH | PDROP, "midi TX", 0); - /* - * We slept, maybe things have changed since last - * dying check - */ - if (retval == EINTR) - goto err0; - - if (retval) - goto err0; - mtx_lock(&m->lock); - mtx_lock(&m->qlock); - m->wchan = 0; - if (!m->busy) - goto err1; - } - - /* - * We are certain than data can be placed on the queue - */ - - used = MIN(MIDIQ_AVAIL(m->outq), len); - used = MIN(used, MIDI_WSIZE); - MIDI_DEBUG(5, - printf("midi_synth: resid %zu len %jd avail %jd\n", - len, (intmax_t)MIDIQ_LEN(m->outq), - (intmax_t)MIDIQ_AVAIL(m->outq))); - - if (midi_dumpraw) - for (i = 0; i < used; i++) - printf("%x ", buf[i]); - - MIDIQ_ENQ(m->outq, buf, used); - len -= used; - - /* - * Inform the bottom half that data can be written - */ - if (!(m->flags & M_TXEN)) { - m->flags |= M_TXEN; - MPU_CALLBACK(m, m->cookie, m->flags); - } - } - /* - * If we Made it here then transfer is good - */ - if (midi_dumpraw) - printf("\n"); - - retval = 0; -err1: mtx_unlock(&m->qlock); - mtx_unlock(&m->lock); -err0: return retval; -} - -static int -midisynth_killnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) -{ - u_char c[3]; - - if (note > 127 || chn > 15) - return (EINVAL); - - if (vel > 127) - vel = 127; - - if (vel == 64) { - c[0] = 0x90 | (chn & 0x0f); /* Note on. */ - c[1] = (u_char)note; - c[2] = 0; - } else { - c[0] = 0x80 | (chn & 0x0f); /* Note off. */ - c[1] = (u_char)note; - c[2] = (u_char)vel; - } - - return midisynth_writeraw(n, c, 3); -} - -static int -midisynth_setinstr(void *n, uint8_t chn, uint16_t instr) -{ - u_char c[2]; - - if (instr > 127 || chn > 15) - return EINVAL; - - c[0] = 0xc0 | (chn & 0x0f); /* Progamme change. */ - c[1] = instr + midi_instroff; - - return midisynth_writeraw(n, c, 2); -} - -static int -midisynth_startnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) -{ - u_char c[3]; - - if (note > 127 || chn > 15) - return EINVAL; - - if (vel > 127) - vel = 127; - - c[0] = 0x90 | (chn & 0x0f); /* Note on. */ - c[1] = (u_char)note; - c[2] = (u_char)vel; - - return midisynth_writeraw(n, c, 3); -} -static int -midisynth_alloc(void *n, uint8_t chan, uint8_t note) -{ - return chan; -} - -static int -midisynth_controller(void *n, uint8_t chn, uint8_t ctrlnum, uint16_t val) -{ - u_char c[3]; - - if (ctrlnum > 127 || chn > 15) - return EINVAL; - - c[0] = 0xb0 | (chn & 0x0f); /* Control Message. */ - c[1] = ctrlnum; - c[2] = val; - return midisynth_writeraw(n, c, 3); -} - -static int -midisynth_bender(void *n, uint8_t chn, uint16_t val) -{ - u_char c[3]; - - if (val > 16383 || chn > 15) - return EINVAL; - - c[0] = 0xe0 | (chn & 0x0f); /* Pitch bend. */ - c[1] = (u_char)val & 0x7f; - c[2] = (u_char)(val >> 7) & 0x7f; - - return midisynth_writeraw(n, c, 3); -} - -/* * Single point of midi destructions. */ static int @@ -1381,24 +817,16 @@ midi_destroy(struct snd_midi *m, int midiuninit) free(MIDIQ_BUF(m->outq), M_MIDI); mtx_destroy(&m->qlock); mtx_destroy(&m->lock); - free(m->synth, M_MIDI); free(m, M_MIDI); return 0; } -/* - * Load and unload functions, creates the /dev/midistat device - */ - static int midi_load(void) { sx_init(&mstat_lock, "midistat lock"); TAILQ_INIT(&midi_devs); - midistat_dev = make_dev(&midistat_cdevsw, MIDI_DEV_MIDICTL, UID_ROOT, - GID_WHEEL, 0666, "midistat"); - return 0; } @@ -1411,9 +839,6 @@ midi_unload(void) MIDI_DEBUG(1, printf("midi_unload()\n")); retval = EBUSY; midistat_lock(); - if (midistat_isopen) - goto exit0; - TAILQ_FOREACH_SAFE(m, &midi_devs, link, tmp) { mtx_lock(&m->lock); if (m->busy) @@ -1421,28 +846,21 @@ midi_unload(void) else retval = midi_destroy(m, 1); if (retval) - goto exit1; + goto exit; } midistat_unlock(); - destroy_dev(midistat_dev); - /* - * Made it here then unload is complete - */ sx_destroy(&mstat_lock); return 0; -exit1: +exit: mtx_unlock(&m->lock); -exit0: midistat_unlock(); if (retval) MIDI_DEBUG(2, printf("midi_unload: failed\n")); return retval; } -extern int seq_modevent(module_t mod, int type, void *data); - static int midi_modevent(module_t mod, int type, void *data) { @@ -1453,14 +871,10 @@ midi_modevent(module_t mod, int type, void *data) switch (type) { case MOD_LOAD: retval = midi_load(); - if (retval == 0) - retval = seq_modevent(mod, type, data); break; case MOD_UNLOAD: retval = midi_unload(); - if (retval == 0) - retval = seq_modevent(mod, type, data); break; default: @@ -1470,73 +884,5 @@ midi_modevent(module_t mod, int type, void *data) return retval; } -kobj_t -midimapper_addseq(void *arg1, int *unit, void **cookie) -{ - unit = NULL; - - return (kobj_t)arg1; -} - -int -midimapper_open_locked(void *arg1, void **cookie) -{ - int retval = 0; - struct snd_midi *m; - - midistat_lockassert(); - TAILQ_FOREACH(m, &midi_devs, link) { - retval++; - } - - return retval; -} - -int -midimapper_open(void *arg1, void **cookie) -{ - int retval; - - midistat_lock(); - retval = midimapper_open_locked(arg1, cookie); - midistat_unlock(); - - return retval; -} - -int -midimapper_close(void *arg1, void *cookie) -{ - return 0; -} - -kobj_t -midimapper_fetch_synth_locked(void *arg, void *cookie, int unit) -{ - struct snd_midi *m; - int retval = 0; - - midistat_lockassert(); - TAILQ_FOREACH(m, &midi_devs, link) { - if (unit == retval) - return (kobj_t)m->synth; - retval++; - } - - return NULL; -} - -kobj_t -midimapper_fetch_synth(void *arg, void *cookie, int unit) -{ - kobj_t synth; - - midistat_lock(); - synth = midimapper_fetch_synth_locked(arg, cookie, unit); - midistat_unlock(); - - return synth; -} - DEV_MODULE(midi, midi_modevent, NULL); MODULE_VERSION(midi, 1); diff --git a/sys/dev/sound/midi/midi.h b/sys/dev/sound/midi/midi.h index 2254fab690e9..286e84264ef3 100644 --- a/sys/dev/sound/midi/midi.h +++ b/sys/dev/sound/midi/midi.h @@ -51,11 +51,4 @@ int midi_uninit(struct snd_midi *_m); int midi_out(struct snd_midi *_m, uint8_t *_buf, int _size); int midi_in(struct snd_midi *_m, uint8_t *_buf, int _size); -kobj_t midimapper_addseq(void *arg1, int *unit, void **cookie); -int midimapper_open_locked(void *arg1, void **cookie); -int midimapper_open(void *arg1, void **cookie); -int midimapper_close(void *arg1, void *cookie); -kobj_t midimapper_fetch_synth_locked(void *arg, void *cookie, int unit); -kobj_t midimapper_fetch_synth(void *arg, void *cookie, int unit); - #endif diff --git a/sys/dev/sound/midi/mpu401.c b/sys/dev/sound/midi/mpu401.c index 2be285bc0040..224ebb1b01f4 100644 --- a/sys/dev/sound/midi/mpu401.c +++ b/sys/dev/sound/midi/mpu401.c @@ -88,8 +88,6 @@ static int mpu401_minqsize(struct snd_midi *, void *); static int mpu401_moutqsize(struct snd_midi *, void *); static void mpu401_mcallback(struct snd_midi *, void *, int); static void mpu401_mcallbackp(struct snd_midi *, void *, int); -static const char *mpu401_mdescr(struct snd_midi *, void *, int); -static const char *mpu401_mprovider(struct snd_midi *, void *); static kobj_method_t mpu401_methods[] = { KOBJMETHOD(mpu_init, mpu401_minit), @@ -98,8 +96,6 @@ static kobj_method_t mpu401_methods[] = { KOBJMETHOD(mpu_outqsize, mpu401_moutqsize), KOBJMETHOD(mpu_callback, mpu401_mcallback), KOBJMETHOD(mpu_callbackp, mpu401_mcallbackp), - KOBJMETHOD(mpu_descr, mpu401_mdescr), - KOBJMETHOD(mpu_provider, mpu401_mprovider), KOBJMETHOD_END }; @@ -122,24 +118,12 @@ mpu401_intr(struct mpu401 *m) int i; int s; -/* - printf("mpu401_intr\n"); -*/ #define RXRDY(m) ( (STATUS(m) & MPU_INPUTBUSY) == 0) #define TXRDY(m) ( (STATUS(m) & MPU_OUTPUTBUSY) == 0) -#if 0 -#define D(x,l) printf("mpu401_intr %d %x %s %s\n",l, x, x&MPU_INPUTBUSY?"RX":"", x&MPU_OUTPUTBUSY?"TX":"") -#else -#define D(x,l) -#endif i = 0; s = STATUS(m); - D(s, 1); while ((s & MPU_INPUTBUSY) == 0 && i < MPU_INTR_BUF) { b[i] = READ(m); -/* - printf("mpu401_intr in i %d d %d\n", i, b[i]); -*/ i++; s = STATUS(m); } @@ -148,15 +132,9 @@ mpu401_intr(struct mpu401 *m) i = 0; while (!(s & MPU_OUTPUTBUSY) && i < MPU_INTR_BUF) { if (midi_out(m->mid, b, 1)) { -/* - printf("mpu401_intr out i %d d %d\n", i, b[0]); -*/ WRITE(m, *b); } else { -/* - printf("mpu401_intr write: no output\n"); -*/ return 0; } i++; @@ -262,13 +240,7 @@ static void mpu401_mcallback(struct snd_midi *sm, void *arg, int flags) { struct mpu401 *m = arg; -#if 0 - printf("mpu401_callback %s %s %s %s\n", - flags & M_RX ? "M_RX" : "", - flags & M_TX ? "M_TX" : "", - flags & M_RXEN ? "M_RXEN" : "", - flags & M_TXEN ? "M_TXEN" : ""); -#endif + if (flags & M_TXEN && m->si) { callout_reset(&m->timer, 1, mpu401_timeout, m); } @@ -278,19 +250,5 @@ mpu401_mcallback(struct snd_midi *sm, void *arg, int flags) static void mpu401_mcallbackp(struct snd_midi *sm, void *arg, int flags) { -/* printf("mpu401_callbackp\n"); */ mpu401_mcallback(sm, arg, flags); } - -static const char * -mpu401_mdescr(struct snd_midi *sm, void *arg, int verbosity) -{ - - return "descr mpu401"; -} - -static const char * -mpu401_mprovider(struct snd_midi *m, void *arg) -{ - return "provider mpu401"; -} diff --git a/sys/dev/sound/midi/mpu_if.m b/sys/dev/sound/midi/mpu_if.m index b7cb586c5dd0..835d887f703a 100644 --- a/sys/dev/sound/midi/mpu_if.m +++ b/sys/dev/sound/midi/mpu_if.m @@ -56,17 +56,6 @@ METHOD void callback { int _flags; }; -METHOD const char * provider { - struct snd_midi *_kobj; - void *_cookie; -}; - -METHOD const char * descr { - struct snd_midi *_kobj; - void *_cookie; - int _verbosity; -}; - METHOD int uninit { struct snd_midi *_kobj; void *_cookie; diff --git a/sys/dev/sound/midi/sequencer.c b/sys/dev/sound/midi/sequencer.c deleted file mode 100644 index 03b71688175c..000000000000 --- a/sys/dev/sound/midi/sequencer.c +++ /dev/null @@ -1,2107 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause - * - * Copyright (c) 2003 Mathew Kanner - * Copyright (c) 1993 Hannu Savolainen - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * The sequencer personality manager. - */ - -#include <sys/param.h> -#include <sys/systm.h> -#include <sys/ioccom.h> - -#include <sys/filio.h> -#include <sys/lock.h> -#include <sys/sockio.h> -#include <sys/fcntl.h> -#include <sys/proc.h> -#include <sys/sysctl.h> - -#include <sys/kernel.h> /* for DATA_SET */ - -#include <sys/module.h> -#include <sys/conf.h> -#include <sys/file.h> -#include <sys/uio.h> -#include <sys/syslog.h> -#include <sys/errno.h> -#include <sys/malloc.h> -#include <sys/bus.h> -#include <machine/resource.h> -#include <machine/bus.h> -#include <machine/clock.h> /* for DELAY */ -#include <sys/soundcard.h> -#include <sys/rman.h> -#include <sys/mman.h> -#include <sys/poll.h> -#include <sys/mutex.h> -#include <sys/condvar.h> -#include <sys/kthread.h> -#include <sys/unistd.h> -#include <sys/selinfo.h> -#include <sys/sx.h> - -#ifdef HAVE_KERNEL_OPTION_HEADERS -#include "opt_snd.h" -#endif - -#include <dev/sound/midi/midi.h> -#include <dev/sound/midi/midiq.h> -#include "synth_if.h" - -#include <dev/sound/midi/sequencer.h> - -#define TMR_TIMERBASE 13 - -#define SND_DEV_SEQ 1 /* Sequencer output /dev/sequencer (FM - * synthesizer and MIDI output) */ -#define SND_DEV_MUSIC 8 /* /dev/music, level 2 interface */ - -/* Length of a sequencer event. */ -#define EV_SZ 8 -#define IEV_SZ 8 - -/* Lookup modes */ -#define LOOKUP_EXIST (0) -#define LOOKUP_OPEN (1) -#define LOOKUP_CLOSE (2) - -#define MIDIDEV(y) (dev2unit(y) & 0x0f) - -/* These are the entries to the sequencer driver. */ -static d_open_t mseq_open; -static d_close_t mseq_close; -static d_ioctl_t mseq_ioctl; -static d_read_t mseq_read; -static d_write_t mseq_write; -static d_poll_t mseq_poll; - -static struct cdevsw seq_cdevsw = { - .d_version = D_VERSION, - .d_open = mseq_open, - .d_close = mseq_close, - .d_read = mseq_read, - .d_write = mseq_write, - .d_ioctl = mseq_ioctl, - .d_poll = mseq_poll, - .d_name = "sequencer", -}; - -struct seq_softc { - KOBJ_FIELDS; - - struct mtx seq_lock, q_lock; - struct cv empty_cv, reset_cv, in_cv, out_cv, state_cv, th_cv; - - MIDIQ_HEAD(, u_char) in_q, out_q; - - u_long flags; - /* Flags (protected by flag_mtx of mididev_info) */ - int fflags; /* Access mode */ - int music; - - int out_water; /* Sequence output threshould */ - snd_sync_parm sync_parm; /* AIOSYNC parameter set */ - struct thread *sync_thread; /* AIOSYNCing thread */ - struct selinfo in_sel, out_sel; - int midi_number; - struct cdev *seqdev, *musicdev; - int unit; - int maxunits; - kobj_t *midis; - int *midi_flags; - kobj_t mapper; - void *mapper_cookie; - struct timeval timerstop, timersub; - int timerbase, tempo; - int timerrun; - int done; - int playing; - int recording; - int busy; - int pre_event_timeout; - int waiting; -}; - -/* - * Module specific stuff, including how many sequecers - * we currently own. - */ - -SYSCTL_NODE(_hw_midi, OID_AUTO, seq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, - "Midi sequencer"); - -int seq_debug; -/* XXX: should this be moved into debug.midi? */ -SYSCTL_INT(_hw_midi_seq, OID_AUTO, debug, CTLFLAG_RW, &seq_debug, 0, ""); - -midi_cmdtab cmdtab_seqevent[] = { - {SEQ_NOTEOFF, "SEQ_NOTEOFF"}, - {SEQ_NOTEON, "SEQ_NOTEON"}, - {SEQ_WAIT, "SEQ_WAIT"}, - {SEQ_PGMCHANGE, "SEQ_PGMCHANGE"}, - {SEQ_SYNCTIMER, "SEQ_SYNCTIMER"}, - {SEQ_MIDIPUTC, "SEQ_MIDIPUTC"}, - {SEQ_DRUMON, "SEQ_DRUMON"}, - {SEQ_DRUMOFF, "SEQ_DRUMOFF"}, - {SEQ_ECHO, "SEQ_ECHO"}, - {SEQ_AFTERTOUCH, "SEQ_AFTERTOUCH"}, - {SEQ_CONTROLLER, "SEQ_CONTROLLER"}, - {SEQ_BALANCE, "SEQ_BALANCE"}, - {SEQ_VOLMODE, "SEQ_VOLMODE"}, - {SEQ_FULLSIZE, "SEQ_FULLSIZE"}, - {SEQ_PRIVATE, "SEQ_PRIVATE"}, - {SEQ_EXTENDED, "SEQ_EXTENDED"}, - {EV_SEQ_LOCAL, "EV_SEQ_LOCAL"}, - {EV_TIMING, "EV_TIMING"}, - {EV_CHN_COMMON, "EV_CHN_COMMON"}, - {EV_CHN_VOICE, "EV_CHN_VOICE"}, - {EV_SYSEX, "EV_SYSEX"}, - {-1, NULL}, -}; - -midi_cmdtab cmdtab_seqioctl[] = { - {SNDCTL_SEQ_RESET, "SNDCTL_SEQ_RESET"}, - {SNDCTL_SEQ_SYNC, "SNDCTL_SEQ_SYNC"}, - {SNDCTL_SYNTH_INFO, "SNDCTL_SYNTH_INFO"}, - {SNDCTL_SEQ_CTRLRATE, "SNDCTL_SEQ_CTRLRATE"}, - {SNDCTL_SEQ_GETOUTCOUNT, "SNDCTL_SEQ_GETOUTCOUNT"}, - {SNDCTL_SEQ_GETINCOUNT, "SNDCTL_SEQ_GETINCOUNT"}, - {SNDCTL_SEQ_PERCMODE, "SNDCTL_SEQ_PERCMODE"}, - {SNDCTL_FM_LOAD_INSTR, "SNDCTL_FM_LOAD_INSTR"}, - {SNDCTL_SEQ_TESTMIDI, "SNDCTL_SEQ_TESTMIDI"}, - {SNDCTL_SEQ_RESETSAMPLES, "SNDCTL_SEQ_RESETSAMPLES"}, - {SNDCTL_SEQ_NRSYNTHS, "SNDCTL_SEQ_NRSYNTHS"}, - {SNDCTL_SEQ_NRMIDIS, "SNDCTL_SEQ_NRMIDIS"}, - {SNDCTL_SEQ_GETTIME, "SNDCTL_SEQ_GETTIME"}, - {SNDCTL_MIDI_INFO, "SNDCTL_MIDI_INFO"}, - {SNDCTL_SEQ_THRESHOLD, "SNDCTL_SEQ_THRESHOLD"}, - {SNDCTL_SYNTH_MEMAVL, "SNDCTL_SYNTH_MEMAVL"}, - {SNDCTL_FM_4OP_ENABLE, "SNDCTL_FM_4OP_ENABLE"}, - {SNDCTL_PMGR_ACCESS, "SNDCTL_PMGR_ACCESS"}, - {SNDCTL_SEQ_PANIC, "SNDCTL_SEQ_PANIC"}, - {SNDCTL_SEQ_OUTOFBAND, "SNDCTL_SEQ_OUTOFBAND"}, - {SNDCTL_TMR_TIMEBASE, "SNDCTL_TMR_TIMEBASE"}, - {SNDCTL_TMR_START, "SNDCTL_TMR_START"}, - {SNDCTL_TMR_STOP, "SNDCTL_TMR_STOP"}, - {SNDCTL_TMR_CONTINUE, "SNDCTL_TMR_CONTINUE"}, - {SNDCTL_TMR_TEMPO, "SNDCTL_TMR_TEMPO"}, - {SNDCTL_TMR_SOURCE, "SNDCTL_TMR_SOURCE"}, - {SNDCTL_TMR_METRONOME, "SNDCTL_TMR_METRONOME"}, - {SNDCTL_TMR_SELECT, "SNDCTL_TMR_SELECT"}, - {SNDCTL_MIDI_PRETIME, "SNDCTL_MIDI_PRETIME"}, - {AIONWRITE, "AIONWRITE"}, - {AIOGSIZE, "AIOGSIZE"}, - {AIOSSIZE, "AIOSSIZE"}, - {AIOGFMT, "AIOGFMT"}, - {AIOSFMT, "AIOSFMT"}, - {AIOGMIX, "AIOGMIX"}, - {AIOSMIX, "AIOSMIX"}, - {AIOSTOP, "AIOSTOP"}, - {AIOSYNC, "AIOSYNC"}, - {AIOGCAP, "AIOGCAP"}, - {-1, NULL}, -}; - -midi_cmdtab cmdtab_timer[] = { - {TMR_WAIT_REL, "TMR_WAIT_REL"}, - {TMR_WAIT_ABS, "TMR_WAIT_ABS"}, - {TMR_STOP, "TMR_STOP"}, - {TMR_START, "TMR_START"}, - {TMR_CONTINUE, "TMR_CONTINUE"}, - {TMR_TEMPO, "TMR_TEMPO"}, - {TMR_ECHO, "TMR_ECHO"}, - {TMR_CLOCK, "TMR_CLOCK"}, - {TMR_SPP, "TMR_SPP"}, - {TMR_TIMESIG, "TMR_TIMESIG"}, - {-1, NULL}, -}; - -midi_cmdtab cmdtab_seqcv[] = { - {MIDI_NOTEOFF, "MIDI_NOTEOFF"}, - {MIDI_NOTEON, "MIDI_NOTEON"}, - {MIDI_KEY_PRESSURE, "MIDI_KEY_PRESSURE"}, - {-1, NULL}, -}; - -midi_cmdtab cmdtab_seqccmn[] = { - {MIDI_CTL_CHANGE, "MIDI_CTL_CHANGE"}, - {MIDI_PGM_CHANGE, "MIDI_PGM_CHANGE"}, - {MIDI_CHN_PRESSURE, "MIDI_CHN_PRESSURE"}, - {MIDI_PITCH_BEND, "MIDI_PITCH_BEND"}, - {MIDI_SYSTEM_PREFIX, "MIDI_SYSTEM_PREFIX"}, - {-1, NULL}, -}; - -#ifndef KOBJMETHOD_END -#define KOBJMETHOD_END { NULL, NULL } -#endif - -/* - * static const char *mpu401_mprovider(kobj_t obj, struct mpu401 *m); - */ - -static kobj_method_t seq_methods[] = { - /* KOBJMETHOD(mpu_provider,mpu401_mprovider), */ - KOBJMETHOD_END -}; - -DEFINE_CLASS(sequencer, seq_methods, 0); - -/* The followings are the local function. */ -static int seq_convertold(u_char *event, u_char *out); - -/* - * static void seq_midiinput(struct seq_softc * scp, void *md); - */ -static void seq_reset(struct seq_softc *scp); -static int seq_sync(struct seq_softc *scp); - -static int seq_processevent(struct seq_softc *scp, u_char *event); - -static int seq_timing(struct seq_softc *scp, u_char *event); -static int seq_local(struct seq_softc *scp, u_char *event); - -static int seq_chnvoice(struct seq_softc *scp, kobj_t md, u_char *event); -static int seq_chncommon(struct seq_softc *scp, kobj_t md, u_char *event); -static int seq_sysex(struct seq_softc *scp, kobj_t md, u_char *event); - -static int seq_fetch_mid(struct seq_softc *scp, int unit, kobj_t *md); -void seq_copytoinput(struct seq_softc *scp, u_char *event, int len); -int seq_modevent(module_t mod, int type, void *data); -struct seq_softc *seqs[10]; -static struct mtx seqinfo_mtx; -static u_long nseq = 0; - -static void timer_start(struct seq_softc *t); -static void timer_stop(struct seq_softc *t); -static void timer_setvals(struct seq_softc *t, int tempo, int timerbase); -static void timer_wait(struct seq_softc *t, int ticks, int wait_abs); -static int timer_now(struct seq_softc *t); - -static void -timer_start(struct seq_softc *t) -{ - t->timerrun = 1; - getmicrotime(&t->timersub); -} - -static void -timer_continue(struct seq_softc *t) -{ - struct timeval now; - - if (t->timerrun == 1) - return; - t->timerrun = 1; - getmicrotime(&now); - timevalsub(&now, &t->timerstop); - timevaladd(&t->timersub, &now); -} - -static void -timer_stop(struct seq_softc *t) -{ - t->timerrun = 0; - getmicrotime(&t->timerstop); -} - -static void -timer_setvals(struct seq_softc *t, int tempo, int timerbase) -{ - t->tempo = tempo; - t->timerbase = timerbase; -} - -static void -timer_wait(struct seq_softc *t, int ticks, int wait_abs) -{ - struct timeval now, when; - int ret; - unsigned long long i; - - while (t->timerrun == 0) { - SEQ_DEBUG(2, printf("Timer wait when timer isn't running\n")); - /* - * The old sequencer used timeouts that only increased - * the timer when the timer was running. - * Hence the sequencer would stick (?) if the - * timer was disabled. - */ - cv_wait(&t->reset_cv, &t->seq_lock); - if (t->playing == 0) - return; - } - - i = ticks * 60ull * 1000000ull / (t->tempo * t->timerbase); - - when.tv_sec = i / 1000000; - when.tv_usec = i % 1000000; - -#if 0 - printf("timer_wait tempo %d timerbase %d ticks %d abs %d u_sec %llu\n", - t->tempo, t->timerbase, ticks, wait_abs, i); -#endif - - if (wait_abs != 0) { - getmicrotime(&now); - timevalsub(&now, &t->timersub); - timevalsub(&when, &now); - } - if (when.tv_sec < 0 || when.tv_usec < 0) { - SEQ_DEBUG(3, - printf("seq_timer error negative time %lds.%06lds\n", - (long)when.tv_sec, (long)when.tv_usec)); - return; - } - i = when.tv_sec * 1000000ull; - i += when.tv_usec; - i *= hz; - i /= 1000000ull; -#if 0 - printf("seq_timer usec %llu ticks %llu\n", - when.tv_sec * 1000000ull + when.tv_usec, i); -#endif - t->waiting = 1; - ret = cv_timedwait(&t->reset_cv, &t->seq_lock, i + 1); - t->waiting = 0; - - if (ret != EWOULDBLOCK) - SEQ_DEBUG(3, printf("seq_timer didn't timeout\n")); - -} - -static int -timer_now(struct seq_softc *t) -{ - struct timeval now; - unsigned long long i; - int ret; - - if (t->timerrun == 0) - now = t->timerstop; - else - getmicrotime(&now); - - timevalsub(&now, &t->timersub); - - i = now.tv_sec * 1000000ull; - i += now.tv_usec; - i *= t->timerbase; -/* i /= t->tempo; */ - i /= 1000000ull; - - ret = i; - /* - * printf("timer_now: %llu %d\n", i, ret); - */ - - return ret; -} - -static void -seq_eventthread(void *arg) -{ - struct seq_softc *scp = arg; - u_char event[EV_SZ]; - - mtx_lock(&scp->seq_lock); - SEQ_DEBUG(2, printf("seq_eventthread started\n")); - while (scp->done == 0) { -restart: - while (scp->playing == 0) { - cv_wait(&scp->state_cv, &scp->seq_lock); - if (scp->done) - goto done; - } - - while (MIDIQ_EMPTY(scp->out_q)) { - cv_broadcast(&scp->empty_cv); - cv_wait(&scp->out_cv, &scp->seq_lock); - if (scp->playing == 0) - goto restart; - if (scp->done) - goto done; - } - - MIDIQ_DEQ(scp->out_q, event, EV_SZ); - - if (MIDIQ_AVAIL(scp->out_q) < scp->out_water) { - cv_broadcast(&scp->out_cv); - selwakeup(&scp->out_sel); - } - seq_processevent(scp, event); - } - -done: - cv_broadcast(&scp->th_cv); - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(2, printf("seq_eventthread finished\n")); - kproc_exit(0); -} - -/* - * seq_processevent: This maybe called by the event thread or the IOCTL - * handler for queued and out of band events respectively. - */ -static int -seq_processevent(struct seq_softc *scp, u_char *event) -{ - int ret; - kobj_t m; - - ret = 0; - - if (event[0] == EV_SEQ_LOCAL) - ret = seq_local(scp, event); - else if (event[0] == EV_TIMING) - ret = seq_timing(scp, event); - else if (event[0] != EV_CHN_VOICE && - event[0] != EV_CHN_COMMON && - event[0] != EV_SYSEX && - event[0] != SEQ_MIDIPUTC) { - ret = 1; - SEQ_DEBUG(2, printf("seq_processevent not known %d\n", - event[0])); - } else if (seq_fetch_mid(scp, event[1], &m) != 0) { - ret = 1; - SEQ_DEBUG(2, printf("seq_processevent midi unit not found %d\n", - event[1])); - } else - switch (event[0]) { - case EV_CHN_VOICE: - ret = seq_chnvoice(scp, m, event); - break; - case EV_CHN_COMMON: - ret = seq_chncommon(scp, m, event); - break; - case EV_SYSEX: - ret = seq_sysex(scp, m, event); - break; - case SEQ_MIDIPUTC: - mtx_unlock(&scp->seq_lock); - ret = SYNTH_WRITERAW(m, &event[2], 1); - mtx_lock(&scp->seq_lock); - break; - } - return ret; -} - -static int -seq_addunit(void) -{ - struct seq_softc *scp; - int ret; - u_char *buf; - - gone_in(15, "Warning! MIDI sequencer to be removed soon: no longer " - "needed or used\n"); - - /* Allocate the softc. */ - ret = ENOMEM; - scp = malloc(sizeof(*scp), M_DEVBUF, M_NOWAIT | M_ZERO); - if (scp == NULL) { - SEQ_DEBUG(1, printf("seq_addunit: softc allocation failed.\n")); - goto err; - } - kobj_init((kobj_t)scp, &sequencer_class); - - buf = malloc(sizeof(*buf) * EV_SZ * 1024, M_TEMP, M_NOWAIT | M_ZERO); - if (buf == NULL) - goto err; - MIDIQ_INIT(scp->in_q, buf, EV_SZ * 1024); - buf = malloc(sizeof(*buf) * EV_SZ * 1024, M_TEMP, M_NOWAIT | M_ZERO); - if (buf == NULL) - goto err; - MIDIQ_INIT(scp->out_q, buf, EV_SZ * 1024); - ret = EINVAL; - - scp->midis = malloc(sizeof(kobj_t) * 32, M_TEMP, M_NOWAIT | M_ZERO); - scp->midi_flags = malloc(sizeof(*scp->midi_flags) * 32, M_TEMP, - M_NOWAIT | M_ZERO); - - if (scp->midis == NULL || scp->midi_flags == NULL) - goto err; - - scp->flags = 0; - - mtx_init(&scp->seq_lock, "seqflq", NULL, 0); - cv_init(&scp->state_cv, "seqstate"); - cv_init(&scp->empty_cv, "seqempty"); - cv_init(&scp->reset_cv, "seqtimer"); - cv_init(&scp->out_cv, "seqqout"); - cv_init(&scp->in_cv, "seqqin"); - cv_init(&scp->th_cv, "seqstart"); - - /* - * Init the damn timer - */ - - scp->mapper = midimapper_addseq(scp, &scp->unit, &scp->mapper_cookie); - if (scp->mapper == NULL) - goto err; - - scp->seqdev = make_dev(&seq_cdevsw, SND_DEV_SEQ, UID_ROOT, GID_WHEEL, - 0666, "sequencer%d", scp->unit); - - scp->musicdev = make_dev(&seq_cdevsw, SND_DEV_MUSIC, UID_ROOT, - GID_WHEEL, 0666, "music%d", scp->unit); - - if (scp->seqdev == NULL || scp->musicdev == NULL) - goto err; - /* - * TODO: Add to list of sequencers this module provides - */ - - ret = - kproc_create - (seq_eventthread, scp, NULL, RFHIGHPID, 0, - "sequencer %02d", scp->unit); - - if (ret) - goto err; - - scp->seqdev->si_drv1 = scp->musicdev->si_drv1 = scp; - - SEQ_DEBUG(2, printf("sequencer %d created scp %p\n", scp->unit, scp)); - - ret = 0; - - mtx_lock(&seqinfo_mtx); - seqs[nseq++] = scp; - mtx_unlock(&seqinfo_mtx); - - goto ok; - -err: - if (scp != NULL) { - if (scp->seqdev != NULL) - destroy_dev(scp->seqdev); - if (scp->musicdev != NULL) - destroy_dev(scp->musicdev); - /* - * TODO: Destroy mutex and cv - */ - if (scp->midis != NULL) - free(scp->midis, M_TEMP); - if (scp->midi_flags != NULL) - free(scp->midi_flags, M_TEMP); - if (scp->out_q.b) - free(scp->out_q.b, M_TEMP); - if (scp->in_q.b) - free(scp->in_q.b, M_TEMP); - free(scp, M_DEVBUF); - } -ok: - return ret; -} - -static int -seq_delunit(int unit) -{ - struct seq_softc *scp = seqs[unit]; - int i; - - //SEQ_DEBUG(4, printf("seq_delunit: %d\n", unit)); - SEQ_DEBUG(1, printf("seq_delunit: 1 \n")); - mtx_lock(&scp->seq_lock); - - scp->playing = 0; - scp->done = 1; - cv_broadcast(&scp->out_cv); - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->reset_cv); - SEQ_DEBUG(1, printf("seq_delunit: 2 \n")); - cv_wait(&scp->th_cv, &scp->seq_lock); - SEQ_DEBUG(1, printf("seq_delunit: 3.0 \n")); - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(1, printf("seq_delunit: 3.1 \n")); - - cv_destroy(&scp->state_cv); - SEQ_DEBUG(1, printf("seq_delunit: 4 \n")); - cv_destroy(&scp->empty_cv); - SEQ_DEBUG(1, printf("seq_delunit: 5 \n")); - cv_destroy(&scp->reset_cv); - SEQ_DEBUG(1, printf("seq_delunit: 6 \n")); - cv_destroy(&scp->out_cv); - SEQ_DEBUG(1, printf("seq_delunit: 7 \n")); - cv_destroy(&scp->in_cv); - SEQ_DEBUG(1, printf("seq_delunit: 8 \n")); - cv_destroy(&scp->th_cv); - - SEQ_DEBUG(1, printf("seq_delunit: 10 \n")); - if (scp->seqdev) - destroy_dev(scp->seqdev); - SEQ_DEBUG(1, printf("seq_delunit: 11 \n")); - if (scp->musicdev) - destroy_dev(scp->musicdev); - SEQ_DEBUG(1, printf("seq_delunit: 12 \n")); - scp->seqdev = scp->musicdev = NULL; - if (scp->midis != NULL) - free(scp->midis, M_TEMP); - SEQ_DEBUG(1, printf("seq_delunit: 13 \n")); - if (scp->midi_flags != NULL) - free(scp->midi_flags, M_TEMP); - SEQ_DEBUG(1, printf("seq_delunit: 14 \n")); - free(scp->out_q.b, M_TEMP); - SEQ_DEBUG(1, printf("seq_delunit: 15 \n")); - free(scp->in_q.b, M_TEMP); - - SEQ_DEBUG(1, printf("seq_delunit: 16 \n")); - - mtx_destroy(&scp->seq_lock); - SEQ_DEBUG(1, printf("seq_delunit: 17 \n")); - free(scp, M_DEVBUF); - - mtx_lock(&seqinfo_mtx); - for (i = unit; i < (nseq - 1); i++) - seqs[i] = seqs[i + 1]; - nseq--; - mtx_unlock(&seqinfo_mtx); - - return 0; -} - -int -seq_modevent(module_t mod, int type, void *data) -{ - int retval, r; - - retval = 0; - - switch (type) { - case MOD_LOAD: - mtx_init(&seqinfo_mtx, "seqmod", NULL, 0); - retval = seq_addunit(); - break; - - case MOD_UNLOAD: - while (nseq) { - r = seq_delunit(nseq - 1); - if (r) { - retval = r; - break; - } - } - if (nseq == 0) { - retval = 0; - mtx_destroy(&seqinfo_mtx); - } - break; - - default: - break; - } - - return retval; -} - -static int -seq_fetch_mid(struct seq_softc *scp, int unit, kobj_t *md) -{ - - if (unit >= scp->midi_number || unit < 0) - return EINVAL; - - *md = scp->midis[unit]; - - return 0; -} - -int -mseq_open(struct cdev *i_dev, int flags, int mode, struct thread *td) -{ - struct seq_softc *scp = i_dev->si_drv1; - int i; - - gone_in(15, "Warning! MIDI sequencer to be removed soon: no longer " - "needed or used\n"); - - if (scp == NULL) - return ENXIO; - - SEQ_DEBUG(3, printf("seq_open: scp %p unit %d, flags 0x%x.\n", - scp, scp->unit, flags)); - - /* - * Mark this device busy. - */ - - midistat_lock(); - mtx_lock(&scp->seq_lock); - if (scp->busy) { - mtx_unlock(&scp->seq_lock); - midistat_unlock(); - SEQ_DEBUG(2, printf("seq_open: unit %d is busy.\n", scp->unit)); - return EBUSY; - } - scp->fflags = flags; - /* - if ((scp->fflags & O_NONBLOCK) != 0) - scp->flags |= SEQ_F_NBIO; - */ - scp->music = MIDIDEV(i_dev) == SND_DEV_MUSIC; - - /* - * Enumerate the available midi devices - */ - scp->midi_number = 0; - scp->maxunits = midimapper_open_locked(scp->mapper, &scp->mapper_cookie); - - if (scp->maxunits == 0) - SEQ_DEBUG(2, printf("seq_open: no midi devices\n")); - - for (i = 0; i < scp->maxunits; i++) { - scp->midis[scp->midi_number] = - midimapper_fetch_synth_locked(scp->mapper, - scp->mapper_cookie, i); - if (scp->midis[scp->midi_number]) { - if (SYNTH_OPEN(scp->midis[scp->midi_number], scp, - scp->fflags) != 0) - scp->midis[scp->midi_number] = NULL; - else { - scp->midi_flags[scp->midi_number] = - SYNTH_QUERY(scp->midis[scp->midi_number]); - scp->midi_number++; - } - } - } - midistat_unlock(); - - timer_setvals(scp, 60, 100); - - timer_start(scp); - timer_stop(scp); - /* - * actually, if we're in rdonly mode, we should start the timer - */ - /* - * TODO: Handle recording now - */ - - scp->out_water = MIDIQ_SIZE(scp->out_q) / 2; - - scp->busy = 1; - mtx_unlock(&scp->seq_lock); - - SEQ_DEBUG(2, printf("seq_open: opened, mode %s.\n", - scp->music ? "music" : "sequencer")); - SEQ_DEBUG(2, - printf("Sequencer %d %p opened maxunits %d midi_number %d:\n", - scp->unit, scp, scp->maxunits, scp->midi_number)); - for (i = 0; i < scp->midi_number; i++) - SEQ_DEBUG(3, printf(" midi %d %p\n", i, scp->midis[i])); - - return 0; -} - -/* - * mseq_close - */ -int -mseq_close(struct cdev *i_dev, int flags, int mode, struct thread *td) -{ - int i; - struct seq_softc *scp = i_dev->si_drv1; - int ret; - - if (scp == NULL) - return ENXIO; - - SEQ_DEBUG(2, printf("seq_close: unit %d.\n", scp->unit)); - - mtx_lock(&scp->seq_lock); - - ret = ENXIO; - if (scp->busy == 0) - goto err; - - seq_reset(scp); - seq_sync(scp); - - for (i = 0; i < scp->midi_number; i++) - if (scp->midis[i]) - SYNTH_CLOSE(scp->midis[i]); - - midimapper_close(scp->mapper, scp->mapper_cookie); - - timer_stop(scp); - - scp->busy = 0; - ret = 0; - -err: - SEQ_DEBUG(3, printf("seq_close: closed ret = %d.\n", ret)); - mtx_unlock(&scp->seq_lock); - return ret; -} - -int -mseq_read(struct cdev *i_dev, struct uio *uio, int ioflag) -{ - int retval, used; - struct seq_softc *scp = i_dev->si_drv1; - -#define SEQ_RSIZE 32 - u_char buf[SEQ_RSIZE]; - - if (scp == NULL) - return ENXIO; - - SEQ_DEBUG(7, printf("mseq_read: unit %d, resid %zd.\n", - scp->unit, uio->uio_resid)); - - mtx_lock(&scp->seq_lock); - if ((scp->fflags & FREAD) == 0) { - SEQ_DEBUG(2, printf("mseq_read: unit %d is not for reading.\n", - scp->unit)); - retval = EIO; - goto err1; - } - /* - * Begin recording. - */ - /* - * if ((scp->flags & SEQ_F_READING) == 0) - */ - /* - * TODO, start recording if not alread - */ - - /* - * I think the semantics are to return as soon - * as possible. - * Second thought, it doesn't seem like midimoutain - * expects that at all. - * TODO: Look up in some sort of spec - */ - - while (uio->uio_resid > 0) { - while (MIDIQ_EMPTY(scp->in_q)) { - retval = EWOULDBLOCK; - /* - * I wish I knew which one to care about - */ - - if (scp->fflags & O_NONBLOCK) - goto err1; - if (ioflag & O_NONBLOCK) - goto err1; - - retval = cv_wait_sig(&scp->in_cv, &scp->seq_lock); - if (retval != 0) - goto err1; - } - - used = MIN(MIDIQ_LEN(scp->in_q), uio->uio_resid); - used = MIN(used, SEQ_RSIZE); - - SEQ_DEBUG(8, printf("midiread: uiomove cc=%d\n", used)); - MIDIQ_DEQ(scp->in_q, buf, used); - mtx_unlock(&scp->seq_lock); - retval = uiomove(buf, used, uio); - mtx_lock(&scp->seq_lock); - if (retval) - goto err1; - } - - retval = 0; -err1: - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(6, printf("mseq_read: ret %d, resid %zd.\n", - retval, uio->uio_resid)); - - return retval; -} - -int -mseq_write(struct cdev *i_dev, struct uio *uio, int ioflag) -{ - u_char event[EV_SZ], newevent[EV_SZ], ev_code; - struct seq_softc *scp = i_dev->si_drv1; - int retval; - int used; - - SEQ_DEBUG(7, printf("seq_write: unit %d, resid %zd.\n", - scp->unit, uio->uio_resid)); - - if (scp == NULL) - return ENXIO; - - mtx_lock(&scp->seq_lock); - - if ((scp->fflags & FWRITE) == 0) { - SEQ_DEBUG(2, printf("seq_write: unit %d is not for writing.\n", - scp->unit)); - retval = EIO; - goto err0; - } - while (uio->uio_resid > 0) { - while (MIDIQ_AVAIL(scp->out_q) == 0) { - retval = EWOULDBLOCK; - if (scp->fflags & O_NONBLOCK) - goto err0; - if (ioflag & O_NONBLOCK) - goto err0; - SEQ_DEBUG(8, printf("seq_write cvwait\n")); - - scp->playing = 1; - cv_broadcast(&scp->out_cv); - cv_broadcast(&scp->state_cv); - - retval = cv_wait_sig(&scp->out_cv, &scp->seq_lock); - /* - * We slept, maybe things have changed since last - * dying check - */ - if (retval != 0) - goto err0; -#if 0 - /* - * Useless test - */ - if (scp != i_dev->si_drv1) - retval = ENXIO; -#endif - } - - used = MIN(uio->uio_resid, 4); - - SEQ_DEBUG(8, printf("seqout: resid %zd len %jd avail %jd\n", - uio->uio_resid, (intmax_t)MIDIQ_LEN(scp->out_q), - (intmax_t)MIDIQ_AVAIL(scp->out_q))); - - if (used != 4) { - retval = ENXIO; - goto err0; - } - mtx_unlock(&scp->seq_lock); - retval = uiomove(event, used, uio); - mtx_lock(&scp->seq_lock); - if (retval) - goto err0; - - ev_code = event[0]; - SEQ_DEBUG(8, printf("seq_write: unit %d, event %s.\n", - scp->unit, midi_cmdname(ev_code, cmdtab_seqevent))); - - /* Have a look at the event code. */ - if (ev_code == SEQ_FULLSIZE) { - /* - * TODO: restore code for SEQ_FULLSIZE - */ -#if 0 - /* - * A long event, these are the patches/samples for a - * synthesizer. - */ - midiunit = *(u_short *)&event[2]; - mtx_lock(&sd->seq_lock); - ret = lookup_mididev(scp, midiunit, LOOKUP_OPEN, &md); - mtx_unlock(&sd->seq_lock); - if (ret != 0) - return (ret); - - SEQ_DEBUG(printf("seq_write: loading a patch to the unit %d.\n", midiunit)); - - ret = md->synth.loadpatch(md, *(short *)&event[0], buf, - p + 4, count, 0); - return (ret); -#else - /* - * For now, just flush the darn buffer - */ - SEQ_DEBUG(2, - printf("seq_write: SEQ_FULLSIZE flusing buffer.\n")); - while (uio->uio_resid > 0) { - mtx_unlock(&scp->seq_lock); - retval = uiomove(event, MIN(EV_SZ, uio->uio_resid), uio); - mtx_lock(&scp->seq_lock); - if (retval) - goto err0; - } - retval = 0; - goto err0; -#endif - } - retval = EINVAL; - if (ev_code >= 128) { - int error; - - /* - * Some sort of an extended event. The size is eight - * bytes. scoop extra info. - */ - if (scp->music && ev_code == SEQ_EXTENDED) { - SEQ_DEBUG(2, printf("seq_write: invalid level two event %x.\n", ev_code)); - goto err0; - } - mtx_unlock(&scp->seq_lock); - if (uio->uio_resid < 4) - error = EINVAL; - else - error = uiomove((caddr_t)&event[4], 4, uio); - mtx_lock(&scp->seq_lock); - if (error) { - SEQ_DEBUG(2, - printf("seq_write: user memory mangled?\n")); - goto err0; - } - } else { - /* - * Size four event. - */ - if (scp->music) { - SEQ_DEBUG(2, printf("seq_write: four byte event in music mode.\n")); - goto err0; - } - } - if (ev_code == SEQ_MIDIPUTC) { - /* - * TODO: event[2] is unit number to receive char. - * Range check it. - */ - } - if (scp->music) { -#ifdef not_ever_ever - if (event[0] == EV_TIMING && - (event[1] == TMR_START || event[1] == TMR_STOP)) { - /* - * For now, try to make midimoutain work by - * forcing these events to be processed - * immediately. - */ - seq_processevent(scp, event); - } else - MIDIQ_ENQ(scp->out_q, event, EV_SZ); -#else - MIDIQ_ENQ(scp->out_q, event, EV_SZ); -#endif - } else { - if (seq_convertold(event, newevent) > 0) - MIDIQ_ENQ(scp->out_q, newevent, EV_SZ); -#if 0 - else - goto err0; -#endif - } - } - - scp->playing = 1; - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->out_cv); - - retval = 0; - -err0: - SEQ_DEBUG(6, - printf("seq_write done: leftover buffer length %zd retval %d\n", - uio->uio_resid, retval)); - mtx_unlock(&scp->seq_lock); - return retval; -} - -int -mseq_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode, - struct thread *td) -{ - int midiunit, ret, tmp; - struct seq_softc *scp = i_dev->si_drv1; - struct synth_info *synthinfo; - struct midi_info *midiinfo; - u_char event[EV_SZ]; - u_char newevent[EV_SZ]; - - kobj_t md; - - /* - * struct snd_size *sndsize; - */ - - if (scp == NULL) - return ENXIO; - - SEQ_DEBUG(6, printf("seq_ioctl: unit %d, cmd %s.\n", - scp->unit, midi_cmdname(cmd, cmdtab_seqioctl))); - - ret = 0; - - switch (cmd) { - case SNDCTL_SEQ_GETTIME: - /* - * ioctl needed by libtse - */ - mtx_lock(&scp->seq_lock); - *(int *)arg = timer_now(scp); - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(6, printf("seq_ioctl: gettime %d.\n", *(int *)arg)); - ret = 0; - break; - case SNDCTL_TMR_METRONOME: - /* fallthrough */ - case SNDCTL_TMR_SOURCE: - /* - * Not implemented - */ - ret = 0; - break; - case SNDCTL_TMR_TEMPO: - event[1] = TMR_TEMPO; - event[4] = *(int *)arg & 0xFF; - event[5] = (*(int *)arg >> 8) & 0xFF; - event[6] = (*(int *)arg >> 16) & 0xFF; - event[7] = (*(int *)arg >> 24) & 0xFF; - goto timerevent; - case SNDCTL_TMR_TIMEBASE: - event[1] = TMR_TIMERBASE; - event[4] = *(int *)arg & 0xFF; - event[5] = (*(int *)arg >> 8) & 0xFF; - event[6] = (*(int *)arg >> 16) & 0xFF; - event[7] = (*(int *)arg >> 24) & 0xFF; - goto timerevent; - case SNDCTL_TMR_START: - event[1] = TMR_START; - goto timerevent; - case SNDCTL_TMR_STOP: - event[1] = TMR_STOP; - goto timerevent; - case SNDCTL_TMR_CONTINUE: - event[1] = TMR_CONTINUE; -timerevent: - event[0] = EV_TIMING; - mtx_lock(&scp->seq_lock); - if (!scp->music) { - ret = EINVAL; - mtx_unlock(&scp->seq_lock); - break; - } - seq_processevent(scp, event); - mtx_unlock(&scp->seq_lock); - break; - case SNDCTL_TMR_SELECT: - SEQ_DEBUG(2, - printf("seq_ioctl: SNDCTL_TMR_SELECT not supported\n")); - ret = EINVAL; - break; - case SNDCTL_SEQ_SYNC: - if (mode == O_RDONLY) { - ret = 0; - break; - } - mtx_lock(&scp->seq_lock); - ret = seq_sync(scp); - mtx_unlock(&scp->seq_lock); - break; - case SNDCTL_SEQ_PANIC: - /* fallthrough */ - case SNDCTL_SEQ_RESET: - /* - * SNDCTL_SEQ_PANIC == SNDCTL_SEQ_RESET - */ - mtx_lock(&scp->seq_lock); - seq_reset(scp); - mtx_unlock(&scp->seq_lock); - ret = 0; - break; - case SNDCTL_SEQ_TESTMIDI: - mtx_lock(&scp->seq_lock); - /* - * TODO: SNDCTL_SEQ_TESTMIDI now means "can I write to the - * device?". - */ - mtx_unlock(&scp->seq_lock); - break; -#if 0 - case SNDCTL_SEQ_GETINCOUNT: - if (mode == O_WRONLY) - *(int *)arg = 0; - else { - mtx_lock(&scp->seq_lock); - *(int *)arg = scp->in_q.rl; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(printf("seq_ioctl: incount %d.\n", - *(int *)arg)); - } - ret = 0; - break; - case SNDCTL_SEQ_GETOUTCOUNT: - if (mode == O_RDONLY) - *(int *)arg = 0; - else { - mtx_lock(&scp->seq_lock); - *(int *)arg = scp->out_q.fl; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(printf("seq_ioctl: outcount %d.\n", - *(int *)arg)); - } - ret = 0; - break; -#endif - case SNDCTL_SEQ_CTRLRATE: - if (*(int *)arg != 0) { - ret = EINVAL; - break; - } - mtx_lock(&scp->seq_lock); - *(int *)arg = scp->timerbase; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(3, printf("seq_ioctl: ctrlrate %d.\n", *(int *)arg)); - ret = 0; - break; - /* - * TODO: ioctl SNDCTL_SEQ_RESETSAMPLES - */ -#if 0 - case SNDCTL_SEQ_RESETSAMPLES: - mtx_lock(&scp->seq_lock); - ret = lookup_mididev(scp, *(int *)arg, LOOKUP_OPEN, &md); - mtx_unlock(&scp->seq_lock); - if (ret != 0) - break; - ret = midi_ioctl(MIDIMKDEV(major(i_dev), *(int *)arg, - SND_DEV_MIDIN), cmd, arg, mode, td); - break; -#endif - case SNDCTL_SEQ_NRSYNTHS: - mtx_lock(&scp->seq_lock); - *(int *)arg = scp->midi_number; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(3, printf("seq_ioctl: synths %d.\n", *(int *)arg)); - ret = 0; - break; - case SNDCTL_SEQ_NRMIDIS: - mtx_lock(&scp->seq_lock); - if (scp->music) - *(int *)arg = 0; - else { - /* - * TODO: count the numbder of devices that can WRITERAW - */ - *(int *)arg = scp->midi_number; - } - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(3, printf("seq_ioctl: midis %d.\n", *(int *)arg)); - ret = 0; - break; - /* - * TODO: ioctl SNDCTL_SYNTH_MEMAVL - */ -#if 0 - case SNDCTL_SYNTH_MEMAVL: - mtx_lock(&scp->seq_lock); - ret = lookup_mididev(scp, *(int *)arg, LOOKUP_OPEN, &md); - mtx_unlock(&scp->seq_lock); - if (ret != 0) - break; - ret = midi_ioctl(MIDIMKDEV(major(i_dev), *(int *)arg, - SND_DEV_MIDIN), cmd, arg, mode, td); - break; -#endif - case SNDCTL_SEQ_OUTOFBAND: - for (ret = 0; ret < EV_SZ; ret++) - event[ret] = (u_char)arg[0]; - - mtx_lock(&scp->seq_lock); - if (scp->music) - ret = seq_processevent(scp, event); - else { - if (seq_convertold(event, newevent) > 0) - ret = seq_processevent(scp, newevent); - else - ret = EINVAL; - } - mtx_unlock(&scp->seq_lock); - break; - case SNDCTL_SYNTH_INFO: - synthinfo = (struct synth_info *)arg; - midiunit = synthinfo->device; - mtx_lock(&scp->seq_lock); - if (seq_fetch_mid(scp, midiunit, &md) == 0) { - bzero(synthinfo, sizeof(*synthinfo)); - synthinfo->name[0] = 'f'; - synthinfo->name[1] = 'a'; - synthinfo->name[2] = 'k'; - synthinfo->name[3] = 'e'; - synthinfo->name[4] = 's'; - synthinfo->name[5] = 'y'; - synthinfo->name[6] = 'n'; - synthinfo->name[7] = 't'; - synthinfo->name[8] = 'h'; - synthinfo->device = midiunit; - synthinfo->synth_type = SYNTH_TYPE_MIDI; - synthinfo->capabilities = scp->midi_flags[midiunit]; - ret = 0; - } else - ret = EINVAL; - mtx_unlock(&scp->seq_lock); - break; - case SNDCTL_MIDI_INFO: - midiinfo = (struct midi_info *)arg; - midiunit = midiinfo->device; - mtx_lock(&scp->seq_lock); - if (seq_fetch_mid(scp, midiunit, &md) == 0) { - bzero(midiinfo, sizeof(*midiinfo)); - midiinfo->name[0] = 'f'; - midiinfo->name[1] = 'a'; - midiinfo->name[2] = 'k'; - midiinfo->name[3] = 'e'; - midiinfo->name[4] = 'm'; - midiinfo->name[5] = 'i'; - midiinfo->name[6] = 'd'; - midiinfo->name[7] = 'i'; - midiinfo->device = midiunit; - midiinfo->capabilities = scp->midi_flags[midiunit]; - /* - * TODO: What devtype? - */ - midiinfo->dev_type = 0x01; - ret = 0; - } else - ret = EINVAL; - mtx_unlock(&scp->seq_lock); - break; - case SNDCTL_SEQ_THRESHOLD: - mtx_lock(&scp->seq_lock); - RANGE(*(int *)arg, 1, MIDIQ_SIZE(scp->out_q) - 1); - scp->out_water = *(int *)arg; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(3, printf("seq_ioctl: water %d.\n", *(int *)arg)); - ret = 0; - break; - case SNDCTL_MIDI_PRETIME: - tmp = *(int *)arg; - if (tmp < 0) - tmp = 0; - mtx_lock(&scp->seq_lock); - scp->pre_event_timeout = (hz * tmp) / 10; - *(int *)arg = scp->pre_event_timeout; - mtx_unlock(&scp->seq_lock); - SEQ_DEBUG(3, printf("seq_ioctl: pretime %d.\n", *(int *)arg)); - ret = 0; - break; - case SNDCTL_FM_4OP_ENABLE: - case SNDCTL_PMGR_IFACE: - case SNDCTL_PMGR_ACCESS: - /* - * Patch manager and fm are ded, ded, ded. - */ - /* fallthrough */ - default: - /* - * TODO: Consider ioctl default case. - * Old code used to - * if ((scp->fflags & O_ACCMODE) == FREAD) { - * ret = EIO; - * break; - * } - * Then pass on the ioctl to device 0 - */ - SEQ_DEBUG(2, - printf("seq_ioctl: unsupported IOCTL %ld.\n", cmd)); - ret = EINVAL; - break; - } - - return ret; -} - -int -mseq_poll(struct cdev *i_dev, int events, struct thread *td) -{ - int ret, lim; - struct seq_softc *scp = i_dev->si_drv1; - - SEQ_DEBUG(3, printf("seq_poll: unit %d.\n", scp->unit)); - SEQ_DEBUG(1, printf("seq_poll: unit %d.\n", scp->unit)); - - mtx_lock(&scp->seq_lock); - - ret = 0; - - /* Look up the appropriate queue and select it. */ - if ((events & (POLLOUT | POLLWRNORM)) != 0) { - /* Start playing. */ - scp->playing = 1; - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->out_cv); - - lim = scp->out_water; - - if (MIDIQ_AVAIL(scp->out_q) < lim) - /* No enough space, record select. */ - selrecord(td, &scp->out_sel); - else - /* We can write now. */ - ret |= events & (POLLOUT | POLLWRNORM); - } - if ((events & (POLLIN | POLLRDNORM)) != 0) { - /* TODO: Start recording. */ - - /* Find out the boundary. */ - lim = 1; - if (MIDIQ_LEN(scp->in_q) < lim) - /* No data ready, record select. */ - selrecord(td, &scp->in_sel); - else - /* We can read now. */ - ret |= events & (POLLIN | POLLRDNORM); - } - mtx_unlock(&scp->seq_lock); - - return (ret); -} - -#if 0 -static void -sein_qtr(void *p, void /* mididev_info */ *md) -{ - struct seq_softc *scp; - - scp = (struct seq_softc *)p; - - mtx_lock(&scp->seq_lock); - - /* Restart playing if we have the data to output. */ - if (scp->queueout_pending) - seq_callback(scp, SEQ_CB_START | SEQ_CB_WR); - /* Check the midi device if we are reading. */ - if ((scp->flags & SEQ_F_READING) != 0) - seq_midiinput(scp, md); - - mtx_unlock(&scp->seq_lock); -} - -#endif -/* - * seq_convertold - * Was the old playevent. Use this to convert and old - * style /dev/sequencer event to a /dev/music event - */ -static int -seq_convertold(u_char *event, u_char *out) -{ - int used; - u_char dev, chn, note, vel; - - out[0] = out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = - out[7] = 0; - - dev = 0; - chn = event[1]; - note = event[2]; - vel = event[3]; - - used = 0; - -restart: - /* - * TODO: Debug statement - */ - switch (event[0]) { - case EV_TIMING: - case EV_CHN_VOICE: - case EV_CHN_COMMON: - case EV_SYSEX: - case EV_SEQ_LOCAL: - out[0] = event[0]; - out[1] = event[1]; - out[2] = event[2]; - out[3] = event[3]; - out[4] = event[4]; - out[5] = event[5]; - out[6] = event[6]; - out[7] = event[7]; - used += 8; - break; - case SEQ_NOTEOFF: - out[0] = EV_CHN_VOICE; - out[1] = dev; - out[2] = MIDI_NOTEOFF; - out[3] = chn; - out[4] = note; - out[5] = 255; - used += 4; - break; - - case SEQ_NOTEON: - out[0] = EV_CHN_VOICE; - out[1] = dev; - out[2] = MIDI_NOTEON; - out[3] = chn; - out[4] = note; - out[5] = vel; - used += 4; - break; - - /* - * wait delay = (event[2] << 16) + (event[3] << 8) + event[4] - */ - - case SEQ_PGMCHANGE: - out[0] = EV_CHN_COMMON; - out[1] = dev; - out[2] = MIDI_PGM_CHANGE; - out[3] = chn; - out[4] = note; - out[5] = vel; - used += 4; - break; -/* - out[0] = EV_TIMING; - out[1] = dev; - out[2] = MIDI_PGM_CHANGE; - out[3] = chn; - out[4] = note; - out[5] = vel; - SEQ_DEBUG(4,printf("seq_playevent: synctimer\n")); - break; -*/ - - case SEQ_MIDIPUTC: - SEQ_DEBUG(4, - printf("seq_playevent: put data 0x%02x, unit %d.\n", - event[1], event[2])); - /* - * Pass through to the midi device. - * device = event[2] - * data = event[1] - */ - out[0] = SEQ_MIDIPUTC; - out[1] = dev; - out[2] = chn; - used += 4; - break; -#ifdef notyet - case SEQ_ECHO: - /* - * This isn't handled here yet because I don't know if I can - * just use four bytes events. There might be consequences - * in the _read routing - */ - if (seq_copytoinput(scp, event, 4) == EAGAIN) { - ret = QUEUEFULL; - break; - } - ret = MORE; - break; -#endif - case SEQ_EXTENDED: - switch (event[1]) { - case SEQ_NOTEOFF: - case SEQ_NOTEON: - case SEQ_PGMCHANGE: - event++; - used = 4; - goto restart; - break; - case SEQ_AFTERTOUCH: - /* - * SYNTH_AFTERTOUCH(md, event[3], event[4]) - */ - case SEQ_BALANCE: - /* - * SYNTH_PANNING(md, event[3], (char)event[4]) - */ - case SEQ_CONTROLLER: - /* - * SYNTH_CONTROLLER(md, event[3], event[4], *(short *)&event[5]) - */ - case SEQ_VOLMODE: - /* - * SYNTH_VOLUMEMETHOD(md, event[3]) - */ - default: - SEQ_DEBUG(2, - printf("seq_convertold: SEQ_EXTENDED type %d" - "not handled\n", event[1])); - break; - } - break; - case SEQ_WAIT: - out[0] = EV_TIMING; - out[1] = TMR_WAIT_REL; - out[4] = event[2]; - out[5] = event[3]; - out[6] = event[4]; - - SEQ_DEBUG(5, printf("SEQ_WAIT %d", - event[2] + (event[3] << 8) + (event[4] << 24))); - - used += 4; - break; - - case SEQ_ECHO: - case SEQ_SYNCTIMER: - case SEQ_PRIVATE: - default: - SEQ_DEBUG(2, - printf("seq_convertold: event type %d not handled %d %d %d\n", - event[0], event[1], event[2], event[3])); - break; - } - return used; -} - -/* - * Writting to the sequencer buffer never blocks and drops - * input which cannot be queued - */ -void -seq_copytoinput(struct seq_softc *scp, u_char *event, int len) -{ - - mtx_assert(&scp->seq_lock, MA_OWNED); - - if (MIDIQ_AVAIL(scp->in_q) < len) { - /* - * ENOROOM? EINPUTDROPPED? ETOUGHLUCK? - */ - SEQ_DEBUG(2, printf("seq_copytoinput: queue full\n")); - } else { - MIDIQ_ENQ(scp->in_q, event, len); - selwakeup(&scp->in_sel); - cv_broadcast(&scp->in_cv); - } - -} - -static int -seq_chnvoice(struct seq_softc *scp, kobj_t md, u_char *event) -{ - int ret, voice; - u_char cmd, chn, note, parm; - - ret = 0; - cmd = event[2]; - chn = event[3]; - note = event[4]; - parm = event[5]; - - mtx_assert(&scp->seq_lock, MA_OWNED); - - SEQ_DEBUG(5, printf("seq_chnvoice: unit %d, dev %d, cmd %s," - " chn %d, note %d, parm %d.\n", scp->unit, event[1], - midi_cmdname(cmd, cmdtab_seqcv), chn, note, parm)); - - voice = SYNTH_ALLOC(md, chn, note); - - mtx_unlock(&scp->seq_lock); - - switch (cmd) { - case MIDI_NOTEON: - if (note < 128 || note == 255) { -#if 0 - if (scp->music && chn == 9) { - /* - * This channel is a percussion. The note - * number is the patch number. - */ - /* - mtx_unlock(&scp->seq_lock); - if (SYNTH_SETINSTR(md, voice, 128 + note) - == EAGAIN) { - mtx_lock(&scp->seq_lock); - return (QUEUEFULL); - } - mtx_lock(&scp->seq_lock); - */ - note = 60; /* Middle C. */ - } -#endif - if (scp->music) { - /* - mtx_unlock(&scp->seq_lock); - if (SYNTH_SETUPVOICE(md, voice, chn) - == EAGAIN) { - mtx_lock(&scp->seq_lock); - return (QUEUEFULL); - } - mtx_lock(&scp->seq_lock); - */ - } - SYNTH_STARTNOTE(md, voice, note, parm); - } - break; - case MIDI_NOTEOFF: - SYNTH_KILLNOTE(md, voice, note, parm); - break; - case MIDI_KEY_PRESSURE: - SYNTH_AFTERTOUCH(md, voice, parm); - break; - default: - ret = 1; - SEQ_DEBUG(2, printf("seq_chnvoice event type %d not handled\n", - event[1])); - break; - } - - mtx_lock(&scp->seq_lock); - return ret; -} - -static int -seq_chncommon(struct seq_softc *scp, kobj_t md, u_char *event) -{ - int ret; - u_short w14; - u_char cmd, chn, p1; - - ret = 0; - cmd = event[2]; - chn = event[3]; - p1 = event[4]; - w14 = *(u_short *)&event[6]; - - SEQ_DEBUG(5, printf("seq_chncommon: unit %d, dev %d, cmd %s, chn %d," - " p1 %d, w14 %d.\n", scp->unit, event[1], - midi_cmdname(cmd, cmdtab_seqccmn), chn, p1, w14)); - mtx_unlock(&scp->seq_lock); - switch (cmd) { - case MIDI_PGM_CHANGE: - SEQ_DEBUG(4, printf("seq_chncommon pgmchn chn %d pg %d\n", - chn, p1)); - SYNTH_SETINSTR(md, chn, p1); - break; - case MIDI_CTL_CHANGE: - SEQ_DEBUG(4, printf("seq_chncommon ctlch chn %d pg %d %d\n", - chn, p1, w14)); - SYNTH_CONTROLLER(md, chn, p1, w14); - break; - case MIDI_PITCH_BEND: - if (scp->music) { - /* - * TODO: MIDI_PITCH_BEND - */ -#if 0 - mtx_lock(&md->synth.vc_mtx); - md->synth.chn_info[chn].bender_value = w14; - if (md->midiunit >= 0) { - /* - * Handle all of the notes playing on this - * channel. - */ - key = ((int)chn << 8); - for (i = 0; i < md->synth.alloc.max_voice; i++) - if ((md->synth.alloc.map[i] & 0xff00) == key) { - mtx_unlock(&md->synth.vc_mtx); - mtx_unlock(&scp->seq_lock); - if (md->synth.bender(md, i, w14) == EAGAIN) { - mtx_lock(&scp->seq_lock); - return (QUEUEFULL); - } - mtx_lock(&scp->seq_lock); - } - } else { - mtx_unlock(&md->synth.vc_mtx); - mtx_unlock(&scp->seq_lock); - if (md->synth.bender(md, chn, w14) == EAGAIN) { - mtx_lock(&scp->seq_lock); - return (QUEUEFULL); - } - mtx_lock(&scp->seq_lock); - } -#endif - } else - SYNTH_BENDER(md, chn, w14); - break; - default: - ret = 1; - SEQ_DEBUG(2, - printf("seq_chncommon event type %d not handled.\n", - event[1])); - break; - } - mtx_lock(&scp->seq_lock); - return ret; -} - -static int -seq_timing(struct seq_softc *scp, u_char *event) -{ - int param; - int ret; - - ret = 0; - param = event[4] + (event[5] << 8) + - (event[6] << 16) + (event[7] << 24); - - SEQ_DEBUG(5, printf("seq_timing: unit %d, cmd %d, param %d.\n", - scp->unit, event[1], param)); - switch (event[1]) { - case TMR_WAIT_REL: - timer_wait(scp, param, 0); - break; - case TMR_WAIT_ABS: - timer_wait(scp, param, 1); - break; - case TMR_START: - timer_start(scp); - cv_broadcast(&scp->reset_cv); - break; - case TMR_STOP: - timer_stop(scp); - /* - * The following cv_broadcast isn't needed since we only - * wait for 0->1 transitions. It probably won't hurt - */ - cv_broadcast(&scp->reset_cv); - break; - case TMR_CONTINUE: - timer_continue(scp); - cv_broadcast(&scp->reset_cv); - break; - case TMR_TEMPO: - if (param < 8) - param = 8; - if (param > 360) - param = 360; - SEQ_DEBUG(4, printf("Timer set tempo %d\n", param)); - timer_setvals(scp, param, scp->timerbase); - break; - case TMR_TIMERBASE: - if (param < 1) - param = 1; - if (param > 1000) - param = 1000; - SEQ_DEBUG(4, printf("Timer set timerbase %d\n", param)); - timer_setvals(scp, scp->tempo, param); - break; - case TMR_ECHO: - /* - * TODO: Consider making 4-byte events for /dev/sequencer - * PRO: Maybe needed by legacy apps - * CON: soundcard.h has been warning for a while many years - * to expect 8 byte events. - */ -#if 0 - if (scp->music) - seq_copytoinput(scp, event, 8); - else { - param = (param << 8 | SEQ_ECHO); - seq_copytoinput(scp, (u_char *)¶m, 4); - } -#else - seq_copytoinput(scp, event, 8); -#endif - break; - default: - SEQ_DEBUG(2, printf("seq_timing event type %d not handled.\n", - event[1])); - ret = 1; - break; - } - return ret; -} - -static int -seq_local(struct seq_softc *scp, u_char *event) -{ - int ret; - - ret = 0; - mtx_assert(&scp->seq_lock, MA_OWNED); - - SEQ_DEBUG(5, printf("seq_local: unit %d, cmd %d\n", scp->unit, - event[1])); - switch (event[1]) { - default: - SEQ_DEBUG(1, printf("seq_local event type %d not handled\n", - event[1])); - ret = 1; - break; - } - return ret; -} - -static int -seq_sysex(struct seq_softc *scp, kobj_t md, u_char *event) -{ - int i, l; - - mtx_assert(&scp->seq_lock, MA_OWNED); - SEQ_DEBUG(5, printf("seq_sysex: unit %d device %d\n", scp->unit, - event[1])); - l = 0; - for (i = 0; i < 6 && event[i + 2] != 0xff; i++) - l = i + 1; - if (l > 0) { - mtx_unlock(&scp->seq_lock); - if (SYNTH_SENDSYSEX(md, &event[2], l) == EAGAIN) { - mtx_lock(&scp->seq_lock); - return 1; - } - mtx_lock(&scp->seq_lock); - } - return 0; -} - -/* - * Reset no longer closes the raw devices nor seq_sync's - * Callers are IOCTL and seq_close - */ -static void -seq_reset(struct seq_softc *scp) -{ - int chn, i; - kobj_t m; - - mtx_assert(&scp->seq_lock, MA_OWNED); - - SEQ_DEBUG(5, printf("seq_reset: unit %d.\n", scp->unit)); - - /* - * Stop reading and writing. - */ - - /* scp->recording = 0; */ - scp->playing = 0; - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->out_cv); - cv_broadcast(&scp->reset_cv); - - /* - * For now, don't reset the timers. - */ - MIDIQ_CLEAR(scp->in_q); - MIDIQ_CLEAR(scp->out_q); - - for (i = 0; i < scp->midi_number; i++) { - m = scp->midis[i]; - mtx_unlock(&scp->seq_lock); - SYNTH_RESET(m); - for (chn = 0; chn < 16; chn++) { - SYNTH_CONTROLLER(m, chn, 123, 0); - SYNTH_CONTROLLER(m, chn, 121, 0); - SYNTH_BENDER(m, chn, 1 << 13); - } - mtx_lock(&scp->seq_lock); - } -} - -/* - * seq_sync - * *really* flush the output queue - * flush the event queue, then flush the synthsisers. - * Callers are IOCTL and close - */ - -#define SEQ_SYNC_TIMEOUT 8 -static int -seq_sync(struct seq_softc *scp) -{ - int i, rl, sync[16], done; - - mtx_assert(&scp->seq_lock, MA_OWNED); - - SEQ_DEBUG(4, printf("seq_sync: unit %d.\n", scp->unit)); - - /* - * Wait until output queue is empty. Check every so often to see if - * the queue is moving along. If it isn't just abort. - */ - while (!MIDIQ_EMPTY(scp->out_q)) { - if (!scp->playing) { - scp->playing = 1; - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->out_cv); - } - rl = MIDIQ_LEN(scp->out_q); - - i = cv_timedwait_sig(&scp->out_cv, - &scp->seq_lock, SEQ_SYNC_TIMEOUT * hz); - - if (i == EINTR || i == ERESTART) { - if (i == EINTR) { - /* - * XXX: I don't know why we stop playing - */ - scp->playing = 0; - cv_broadcast(&scp->out_cv); - } - return i; - } - if (i == EWOULDBLOCK && rl == MIDIQ_LEN(scp->out_q) && - scp->waiting == 0) { - /* - * A queue seems to be stuck up. Give up and clear - * queues. - */ - MIDIQ_CLEAR(scp->out_q); - scp->playing = 0; - cv_broadcast(&scp->state_cv); - cv_broadcast(&scp->out_cv); - cv_broadcast(&scp->reset_cv); - - /* - * TODO: Consider if the raw devices need to be flushed - */ - - SEQ_DEBUG(1, printf("seq_sync queue stuck, aborting\n")); - - return i; - } - } - - scp->playing = 0; - /* - * Since syncing a midi device might block, unlock scp->seq_lock. - */ - - mtx_unlock(&scp->seq_lock); - for (i = 0; i < scp->midi_number; i++) - sync[i] = 1; - - do { - done = 1; - for (i = 0; i < scp->midi_number; i++) - if (sync[i]) { - if (SYNTH_INSYNC(scp->midis[i]) == 0) - sync[i] = 0; - else - done = 0; - } - if (!done) - DELAY(5000); - - } while (!done); - - mtx_lock(&scp->seq_lock); - return 0; -} - -char * -midi_cmdname(int cmd, midi_cmdtab *tab) -{ - while (tab->name != NULL) { - if (cmd == tab->cmd) - return (tab->name); - tab++; - } - - return ("unknown"); -} diff --git a/sys/dev/sound/midi/synth_if.m b/sys/dev/sound/midi/synth_if.m deleted file mode 100644 index a763b3422bc6..000000000000 --- a/sys/dev/sound/midi/synth_if.m +++ /dev/null @@ -1,312 +0,0 @@ -#- -# Copyright (c) 2003 Mathew Kanner -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -# -# - -INTERFACE synth; - -#include <sys/systm.h> - -CODE { - -synth_killnote_t nokillnote; -synth_startnote_t nostartnote; -synth_setinstr_t nosetinstr; -synth_hwcontrol_t nohwcontrol; -synth_aftertouch_t noaftertouch; -synth_panning_t nopanning; -synth_controller_t nocontroller; -synth_volumemethod_t novolumemethod; -synth_bender_t nobender; -synth_setupvoice_t nosetupvoice; -synth_sendsysex_t nosendsysex; -synth_allocvoice_t noallocvoice; -synth_writeraw_t nowriteraw; -synth_reset_t noreset; -synth_shortname_t noshortname; -synth_open_t noopen; -synth_close_t noclose; -synth_query_t noquery; -synth_insync_t noinsync; -synth_alloc_t noalloc; - - int - nokillnote(void *_kobj, uint8_t _chn, uint8_t _note, uint8_t _vel) - { - printf("nokillnote\n"); - return 0; - } - - int - noopen(void *_kobj, void *_arg, int mode) - { - printf("noopen\n"); - return 0; - } - - int - noquery(void *_kboj) - { - printf("noquery\n"); - return 0; - } - - int - nostartnote(void *_kb, uint8_t _voice, uint8_t _note, uint8_t _parm) - { - printf("nostartnote\n"); - return 0; - } - - int - nosetinstr(void *_kb, uint8_t _chn, uint16_t _patchno) - { - printf("nosetinstr\n"); - return 0; - } - - int - nohwcontrol(void *_kb, uint8_t *_event) - { - printf("nohwcontrol\n"); - return 0; - } - - int - noaftertouch ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2) - { - printf("noaftertouch\n"); - return 0; - } - - int - nopanning ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2) - { - printf("nopanning\n"); - return 0; - } - - int - nocontroller ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2, uint16_t _x3) - { - printf("nocontroller\n"); - return 0; - } - - int - novolumemethod ( - void /* X */ * _kobj, - uint8_t _x1) - { - printf("novolumemethod\n"); - return 0; - } - - int - nobender ( void /* X */ * _kobj, uint8_t _voice, uint16_t _bend) - { - printf("nobender\n"); - return 0; - } - - int - nosetupvoice ( void /* X */ * _kobj, uint8_t _voice, uint8_t _chn) - { - - printf("nosetupvoice\n"); - return 0; - } - - int - nosendsysex ( void /* X */ * _kobj, void * _buf, size_t _len) - { - printf("nosendsysex\n"); - return 0; - } - - int - noallocvoice ( void /* X */ * _kobj, uint8_t _chn, uint8_t _note, void *_x) - { - printf("noallocvoice\n"); - return 0; - } - - int - nowriteraw ( void /* X */ * _kobjt, uint8_t * _buf, size_t _len) - { - printf("nowriteraw\n"); - return 1; - } - - int - noreset ( void /* X */ * _kobjt) - { - - printf("noreset\n"); - return 0; - } - - char * - noshortname (void /* X */ * _kobjt) - { - printf("noshortname\n"); - return "noshortname"; - } - - int - noclose ( void /* X */ * _kobjt) - { - - printf("noclose\n"); - return 0; - } - - int - noinsync (void /* X */ * _kobjt) - { - - printf("noinsync\n"); - return 0; - } - - int - noalloc ( void /* x */ * _kbojt, uint8_t _chn, uint8_t _note) - { - printf("noalloc\n"); - return 0; - } -} - -METHOD int killnote { - void /* X */ *_kobj; - uint8_t _chan; - uint8_t _note; - uint8_t _vel; -} DEFAULT nokillnote; - -METHOD int startnote { - void /* X */ *_kobj; - uint8_t _voice; - uint8_t _note; - uint8_t _parm; -} DEFAULT nostartnote; - -METHOD int setinstr { - void /* X */ *_kobj; - uint8_t _chn; - uint16_t _patchno; -} DEFAULT nosetinstr; - -METHOD int hwcontrol { - void /* X */ *_kobj; - uint8_t *_event; -} DEFAULT nohwcontrol; - -METHOD int aftertouch { - void /* X */ *_kobj; - uint8_t _x1; - uint8_t _x2; -} DEFAULT noaftertouch; - -METHOD int panning { - void /* X */ *_kobj; - uint8_t _x1; - uint8_t _x2; -} DEFAULT nopanning; - -METHOD int controller { - void /* X */ *_kobj; - uint8_t _x1; - uint8_t _x2; - uint16_t _x3; -} DEFAULT nocontroller; - -METHOD int volumemethod { - void /* X */ *_kobj; - uint8_t _x1; -} DEFAULT novolumemethod; - -METHOD int bender { - void /* X */ *_kobj; - uint8_t _voice; - uint16_t _bend; -} DEFAULT nobender; - -METHOD int setupvoice { - void /* X */ *_kobj; - uint8_t _voice; - uint8_t _chn; -} DEFAULT nosetupvoice; - -METHOD int sendsysex { - void /* X */ *_kobj; - void *_buf; - size_t _len; -} DEFAULT nosendsysex; - -METHOD int allocvoice { - void /* X */ *_kobj; - uint8_t _chn; - uint8_t _note; - void *_x; -} DEFAULT noallocvoice; - -METHOD int writeraw { - void /* X */ *_kobjt; - uint8_t *_buf; - size_t _len; -} DEFAULT nowriteraw; - -METHOD int reset { - void /* X */ *_kobjt; -} DEFAULT noreset; - -METHOD char * shortname { - void /* X */ *_kobjt; -} DEFAULT noshortname; - -METHOD int open { - void /* X */ *_kobjt; - void *_sythn; - int _mode; -} DEFAULT noopen; - -METHOD int close { - void /* X */ *_kobjt; -} DEFAULT noclose; - -METHOD int query { - void /* X */ *_kobjt; -} DEFAULT noquery; - -METHOD int insync { - void /* X */ *_kobjt; -} DEFAULT noinsync; - -METHOD int alloc { - void /* x */ *_kbojt; - uint8_t _chn; - uint8_t _note; -} DEFAULT noalloc; diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c index 092af3298f0e..f281dff36248 100644 --- a/sys/dev/sound/pcm/mixer.c +++ b/sys/dev/sound/pcm/mixer.c @@ -750,8 +750,8 @@ mixer_init(device_t dev, kobj_class_t cls, void *devinfo) mixer_setrecsrc(m, 0); /* Set default input. */ - pdev = make_dev(&mixer_cdevsw, SND_DEV_CTL, UID_ROOT, GID_WHEEL, 0666, - "mixer%d", unit); + pdev = make_dev(&mixer_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "mixer%d", + unit); pdev->si_drv1 = m; snddev->mixer_dev = pdev; diff --git a/sys/dev/sound/pcm/sndstat.c b/sys/dev/sound/pcm/sndstat.c index 509a35c5a038..51d0fb3bb686 100644 --- a/sys/dev/sound/pcm/sndstat.c +++ b/sys/dev/sound/pcm/sndstat.c @@ -52,7 +52,6 @@ #define SS_TYPE_PCM 1 #define SS_TYPE_MIDI 2 -#define SS_TYPE_SEQUENCER 3 static d_open_t sndstat_open; static void sndstat_close(void *); @@ -1165,8 +1164,6 @@ sndstat_register(device_t dev, char *str) type = SS_TYPE_PCM; else if (!strcmp(devtype, "midi")) type = SS_TYPE_MIDI; - else if (!strcmp(devtype, "sequencer")) - type = SS_TYPE_SEQUENCER; else return (EINVAL); @@ -1441,8 +1438,8 @@ static void sndstat_sysinit(void *p) { sx_init(&sndstat_lock, "sndstat lock"); - sndstat_dev = make_dev(&sndstat_cdevsw, SND_DEV_STATUS, - UID_ROOT, GID_WHEEL, 0644, "sndstat"); + sndstat_dev = make_dev(&sndstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644, + "sndstat"); } SYSINIT(sndstat_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, sndstat_sysinit, NULL); diff --git a/sys/dev/sound/pcm/sound.h b/sys/dev/sound/pcm/sound.h index 315452e294d1..6bd435d0ea25 100644 --- a/sys/dev/sound/pcm/sound.h +++ b/sys/dev/sound/pcm/sound.h @@ -148,14 +148,6 @@ struct snd_mixer; #define RANGE(var, low, high) (var) = \ (((var)<(low))? (low) : ((var)>(high))? (high) : (var)) -enum { - SND_DEV_CTL = 0, /* Control port /dev/mixer */ - SND_DEV_SEQ, /* Sequencer /dev/sequencer */ - SND_DEV_MIDIN, /* Raw midi access */ - SND_DEV_DSP, /* Digitized voice /dev/dsp */ - SND_DEV_STATUS, /* /dev/sndstat */ -}; - #define DSP_DEFAULT_SPEED 8000 extern int snd_unit; diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h index cac743884ee6..ac58d44102a0 100644 --- a/sys/dev/ufshci/ufshci_private.h +++ b/sys/dev/ufshci/ufshci_private.h @@ -149,6 +149,8 @@ struct ufshci_hw_queue { bus_dmamap_t queuemem_map; bus_addr_t req_queue_addr; + bus_addr_t *ucd_bus_addr; + uint32_t num_entries; uint32_t num_trackers; @@ -198,8 +200,6 @@ struct ufshci_req_queue { bus_dma_tag_t dma_tag_payload; bus_dmamap_t ucdmem_map; - - bus_addr_t ucd_addr; }; struct ufshci_device { diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c index 4670281d367a..b1f303afaef5 100644 --- a/sys/dev/ufshci/ufshci_req_sdb.c +++ b/sys/dev/ufshci/ufshci_req_sdb.c @@ -48,6 +48,29 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue) } } +static void +ufshci_ucd_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) +{ + struct ufshci_hw_queue *hwq = arg; + int i; + + if (error != 0) { + printf("ufshci: Failed to map UCD, error = %d\n", error); + return; + } + + if (hwq->num_trackers != nseg) { + printf( + "ufshci: Failed to map UCD, num_trackers = %d, nseg = %d\n", + hwq->num_trackers, nseg); + return; + } + + for (i = 0; i < nseg; i++) { + hwq->ucd_bus_addr[i] = seg[i].ds_addr; + } +} + static int ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, uint32_t num_entries, struct ufshci_controller *ctrlr) @@ -55,7 +78,6 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; struct ufshci_tracker *tr; size_t ucd_allocsz, payload_allocsz; - uint64_t ucdmem_phys; uint8_t *ucdmem; int i, error; @@ -71,10 +93,11 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, * Allocate physical memory for UTP Command Descriptor (UCD) * Note: UFSHCI UCD format is restricted to 128-byte alignment. */ - error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, - ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - ucd_allocsz, howmany(ucd_allocsz, ctrlr->page_size), - ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_ucd); + error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, 0, + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ucd_allocsz, + howmany(ucd_allocsz, sizeof(struct ufshci_utp_cmd_desc)), + sizeof(struct ufshci_utp_cmd_desc), 0, NULL, NULL, + &req_queue->dma_tag_ucd); if (error != 0) { ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n", error); @@ -88,7 +111,7 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, } if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map, - ucdmem, ucd_allocsz, ufshci_single_map, &ucdmem_phys, 0) != 0) { + ucdmem, ucd_allocsz, ufshci_ucd_map, hwq, 0) != 0) { ufshci_printf(ctrlr, "failed to load cmd desc memory\n"); bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd, req_queue->ucdmem_map); @@ -96,7 +119,6 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, } req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem; - req_queue->ucd_addr = ucdmem_phys; /* * Allocate physical memory for PRDT @@ -128,10 +150,9 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, tr->slot_state = UFSHCI_SLOT_STATE_FREE; tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem; - tr->ucd_bus_addr = ucdmem_phys; + tr->ucd_bus_addr = hwq->ucd_bus_addr[i]; ucdmem += sizeof(struct ufshci_utp_cmd_desc); - ucdmem_phys += sizeof(struct ufshci_utp_cmd_desc); hwq->act_tr[i] = tr; } @@ -175,6 +196,11 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI, M_ZERO | M_NOWAIT); hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + hwq->num_entries = req_queue->num_entries; + hwq->num_trackers = req_queue->num_trackers; + req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) * + req_queue->num_trackers, + M_UFSHCI, M_ZERO | M_NOWAIT); mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF); @@ -277,6 +303,7 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, if (mtx_initialized(&hwq->qlock)) mtx_destroy(&hwq->qlock); + free(req_queue->hwq->ucd_bus_addr, M_UFSHCI); free(req_queue->hwq, M_UFSHCI); } diff --git a/sys/dev/usb/controller/xhci_pci.c b/sys/dev/usb/controller/xhci_pci.c index b50e33ea36ce..d5cfd228a429 100644 --- a/sys/dev/usb/controller/xhci_pci.c +++ b/sys/dev/usb/controller/xhci_pci.c @@ -99,6 +99,11 @@ xhci_pci_match(device_t self) return ("AMD Starship USB 3.0 controller"); case 0x149c1022: return ("AMD Matisse USB 3.0 controller"); + case 0x15b61022: + case 0x15b71022: + return ("AMD Raphael/Granite Ridge USB 3.1 controller"); + case 0x15b81022: + return ("AMD Raphael/Granite Ridge USB 2.0 controller"); case 0x15e01022: case 0x15e11022: return ("AMD Raven USB 3.1 controller"); @@ -109,6 +114,8 @@ xhci_pci_match(device_t self) return ("AMD 300 Series USB 3.1 controller"); case 0x43d51022: return ("AMD 400 Series USB 3.1 controller"); + case 0x43f71022: + return ("AMD 600 Series USB 3.2 controller"); case 0x78121022: case 0x78141022: case 0x79141022: diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c index 819debadd1ac..9f2b009d02ec 100644 --- a/sys/dev/vmm/vmm_dev.c +++ b/sys/dev/vmm/vmm_dev.c @@ -30,7 +30,8 @@ #include <dev/vmm/vmm_mem.h> #include <dev/vmm/vmm_stat.h> -#if defined(__amd64__) && defined(COMPAT_FREEBSD12) +#ifdef __amd64__ +#ifdef COMPAT_FREEBSD12 struct vm_memseg_12 { int segid; size_t len; @@ -42,7 +43,22 @@ _Static_assert(sizeof(struct vm_memseg_12) == 80, "COMPAT_FREEBSD12 ABI"); _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_12) #define VM_GET_MEMSEG_12 \ _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_12) -#endif +#endif /* COMPAT_FREEBSD12 */ +#ifdef COMPAT_FREEBSD14 +struct vm_memseg_14 { + int segid; + size_t len; + char name[VM_MAX_SUFFIXLEN + 1]; +}; +_Static_assert(sizeof(struct vm_memseg_14) == (VM_MAX_SUFFIXLEN + 1 + 16), + "COMPAT_FREEBSD14 ABI"); + +#define VM_ALLOC_MEMSEG_14 \ + _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_14) +#define VM_GET_MEMSEG_14 \ + _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_14) +#endif /* COMPAT_FREEBSD14 */ +#endif /* __amd64__ */ struct devmem_softc { int segid; @@ -257,7 +273,8 @@ get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) } static int -alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) +alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len, + struct domainset *domainset) { char *name; int error; @@ -278,8 +295,7 @@ alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) if (error) goto done; } - - error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem); + error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem, domainset); if (error) goto done; @@ -295,6 +311,20 @@ done: return (error); } +#if defined(__amd64__) && \ + (defined(COMPAT_FREEBSD14) || defined(COMPAT_FREEBSD12)) +/* + * Translate pre-15.0 memory segment identifiers into their 15.0 counterparts. + */ +static void +adjust_segid(struct vm_memseg *mseg) +{ + if (mseg->segid != VM_SYSMEM) { + mseg->segid += (VM_BOOTROM - 1); + } +} +#endif + static int vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, uint64_t *regval) @@ -353,10 +383,16 @@ static const struct vmmdev_ioctl vmmdev_ioctls[] = { VMMDEV_IOCTL(VM_STATS, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_STAT_DESC, 0), -#if defined(__amd64__) && defined(COMPAT_FREEBSD12) +#ifdef __amd64__ +#ifdef COMPAT_FREEBSD12 VMMDEV_IOCTL(VM_ALLOC_MEMSEG_12, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), #endif +#ifdef COMPAT_FREEBSD14 + VMMDEV_IOCTL(VM_ALLOC_MEMSEG_14, + VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), +#endif +#endif /* __amd64__ */ VMMDEV_IOCTL(VM_ALLOC_MEMSEG, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_MMAP_MEMSEG, @@ -366,9 +402,14 @@ static const struct vmmdev_ioctl vmmdev_ioctls[] = { VMMDEV_IOCTL(VM_REINIT, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), -#if defined(__amd64__) && defined(COMPAT_FREEBSD12) +#ifdef __amd64__ +#if defined(COMPAT_FREEBSD12) VMMDEV_IOCTL(VM_GET_MEMSEG_12, VMMDEV_IOCTL_SLOCK_MEMSEGS), #endif +#ifdef COMPAT_FREEBSD14 + VMMDEV_IOCTL(VM_GET_MEMSEG_14, VMMDEV_IOCTL_SLOCK_MEMSEGS), +#endif +#endif /* __amd64__ */ VMMDEV_IOCTL(VM_GET_MEMSEG, VMMDEV_IOCTL_SLOCK_MEMSEGS), VMMDEV_IOCTL(VM_MMAP_GETNEXT, VMMDEV_IOCTL_SLOCK_MEMSEGS), @@ -388,6 +429,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct vmmdev_softc *sc; struct vcpu *vcpu; const struct vmmdev_ioctl *ioctl; + struct vm_memseg *mseg; int error, vcpuid; sc = vmmdev_lookup2(cdev); @@ -499,20 +541,77 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); break; } -#if defined(__amd64__) && defined(COMPAT_FREEBSD12) +#ifdef __amd64__ +#ifdef COMPAT_FREEBSD12 case VM_ALLOC_MEMSEG_12: - error = alloc_memseg(sc, (struct vm_memseg *)data, - sizeof(((struct vm_memseg_12 *)0)->name)); + mseg = (struct vm_memseg *)data; + + adjust_segid(mseg); + error = alloc_memseg(sc, mseg, + sizeof(((struct vm_memseg_12 *)0)->name), NULL); break; case VM_GET_MEMSEG_12: - error = get_memseg(sc, (struct vm_memseg *)data, + mseg = (struct vm_memseg *)data; + + adjust_segid(mseg); + error = get_memseg(sc, mseg, sizeof(((struct vm_memseg_12 *)0)->name)); break; -#endif - case VM_ALLOC_MEMSEG: - error = alloc_memseg(sc, (struct vm_memseg *)data, - sizeof(((struct vm_memseg *)0)->name)); +#endif /* COMPAT_FREEBSD12 */ +#ifdef COMPAT_FREEBSD14 + case VM_ALLOC_MEMSEG_14: + mseg = (struct vm_memseg *)data; + + adjust_segid(mseg); + error = alloc_memseg(sc, mseg, + sizeof(((struct vm_memseg_14 *)0)->name), NULL); + break; + case VM_GET_MEMSEG_14: + mseg = (struct vm_memseg *)data; + + adjust_segid(mseg); + error = get_memseg(sc, mseg, + sizeof(((struct vm_memseg_14 *)0)->name)); + break; +#endif /* COMPAT_FREEBSD14 */ +#endif /* __amd64__ */ + case VM_ALLOC_MEMSEG: { + domainset_t *mask; + struct domainset *domainset, domain; + + domainset = NULL; + mseg = (struct vm_memseg *)data; + if (mseg->ds_policy != DOMAINSET_POLICY_INVALID && mseg->ds_mask != NULL) { + if (mseg->ds_mask_size < sizeof(domainset_t) || + mseg->ds_mask_size > DOMAINSET_MAXSIZE / NBBY) { + error = ERANGE; + break; + } + memset(&domain, 0, sizeof(domain)); + mask = malloc(mseg->ds_mask_size, M_VMMDEV, M_WAITOK); + error = copyin(mseg->ds_mask, mask, mseg->ds_mask_size); + if (error) { + free(mask, M_VMMDEV); + break; + } + error = domainset_populate(&domain, mask, mseg->ds_policy, + mseg->ds_mask_size); + if (error) { + free(mask, M_VMMDEV); + break; + } + domainset = domainset_create(&domain); + if (domainset == NULL) { + error = EINVAL; + free(mask, M_VMMDEV); + break; + } + free(mask, M_VMMDEV); + } + error = alloc_memseg(sc, mseg, sizeof(mseg->name), domainset); + break; + } case VM_GET_MEMSEG: error = get_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg *)0)->name)); @@ -820,7 +919,6 @@ sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) buflen = VM_MAX_NAMELEN + 1; buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); - strlcpy(buf, "beavis", buflen); error = sysctl_handle_string(oidp, buf, buflen, req); if (error == 0 && req->newptr != NULL) error = vmmdev_lookup_and_destroy(buf, req->td->td_ucred); @@ -830,7 +928,7 @@ sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, NULL, 0, sysctl_vmm_destroy, "A", - NULL); + "Destroy a vmm(4) instance (legacy interface)"); static struct cdevsw vmmdevsw = { .d_name = "vmmdev", @@ -909,7 +1007,6 @@ sysctl_vmm_create(SYSCTL_HANDLER_ARGS) buflen = VM_MAX_NAMELEN + 1; buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); - strlcpy(buf, "beavis", buflen); error = sysctl_handle_string(oidp, buf, buflen, req); if (error == 0 && req->newptr != NULL) error = vmmdev_create(buf, req->td->td_ucred); @@ -919,7 +1016,7 @@ sysctl_vmm_create(SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, NULL, 0, sysctl_vmm_create, "A", - NULL); + "Create a vmm(4) instance (legacy interface)"); static int vmmctl_open(struct cdev *cdev, int flags, int fmt, struct thread *td) diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c index c61ae2d44b96..be59e37de33d 100644 --- a/sys/dev/vmm/vmm_mem.c +++ b/sys/dev/vmm/vmm_mem.c @@ -7,6 +7,7 @@ #include <sys/types.h> #include <sys/lock.h> +#include <sys/malloc.h> #include <sys/sx.h> #include <sys/systm.h> @@ -156,10 +157,11 @@ vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) } int -vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) +vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem, + struct domainset *obj_domainset) { - struct vm_mem *mem; struct vm_mem_seg *seg; + struct vm_mem *mem; vm_object_t obj; mem = vm_mem(vm); @@ -179,13 +181,22 @@ vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) return (EINVAL); } + /* + * When given an impossible policy, signal an + * error to the user. + */ + if (obj_domainset != NULL && domainset_empty_vm(obj_domainset)) + return (EINVAL); obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT); if (obj == NULL) return (ENOMEM); seg->len = len; seg->object = obj; + if (obj_domainset != NULL) + seg->object->domain.dr_policy = obj_domainset; seg->sysmem = sysmem; + return (0); } diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h index a4be4c1c57aa..856470cf2590 100644 --- a/sys/dev/vmm/vmm_mem.h +++ b/sys/dev/vmm/vmm_mem.h @@ -8,6 +8,27 @@ #ifndef _DEV_VMM_MEM_H_ #define _DEV_VMM_MEM_H_ +/* Maximum number of NUMA domains in a guest. */ +#define VM_MAXMEMDOM 8 +#define VM_MAXSYSMEM VM_MAXMEMDOM + +/* + * Identifiers for memory segments. + * Each guest NUMA domain is represented by a single system + * memory segment from [VM_SYSMEM, VM_MAXSYSMEM). + * The remaining identifiers can be used to create devmem segments. + */ +enum { + VM_SYSMEM = 0, + VM_BOOTROM = VM_MAXSYSMEM, + VM_FRAMEBUFFER, + VM_PCIROM, + VM_MEMSEG_END +}; + +#define VM_MAX_MEMSEGS VM_MEMSEG_END +#define VM_MAX_MEMMAPS (VM_MAX_MEMSEGS * 2) + #ifdef _KERNEL #include <sys/types.h> @@ -31,9 +52,6 @@ struct vm_mem_map { int flags; }; -#define VM_MAX_MEMSEGS 4 -#define VM_MAX_MEMMAPS 8 - struct vm_mem { struct vm_mem_map mem_maps[VM_MAX_MEMMAPS]; struct vm_mem_seg mem_segs[VM_MAX_MEMSEGS]; @@ -55,7 +73,8 @@ void vm_assert_memseg_xlocked(struct vm *vm); int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, size_t len, int prot, int flags); int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); -int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); +int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem, + struct domainset *obj_domainset); void vm_free_memseg(struct vm *vm, int ident); /* diff --git a/sys/dev/vt/hw/vga/vt_vga.c b/sys/dev/vt/hw/vga/vt_vga.c index 64039575c0ad..675c0573bd7e 100644 --- a/sys/dev/vt/hw/vga/vt_vga.c +++ b/sys/dev/vt/hw/vga/vt_vga.c @@ -1347,7 +1347,7 @@ vga_postswitch(struct vt_device *vd) /* Reinit VGA mode, to restore view after app which change mode. */ vga_initialize(vd, (vd->vd_flags & VDF_TEXTMODE)); - /* Ask vt(9) to update chars on visible area. */ + /* Ask vt(4) to update chars on visible area. */ vd->vd_flags |= VDF_INVALID; } diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c index b0f58b38a6f1..b51ef6766de4 100644 --- a/sys/dev/vt/vt_core.c +++ b/sys/dev/vt/vt_core.c @@ -125,10 +125,10 @@ static const struct terminal_class vt_termclass = { (vw)->vw_number) static SYSCTL_NODE(_kern, OID_AUTO, vt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, - "vt(9) parameters"); + "vt(4) parameters"); static VT_SYSCTL_INT(enable_altgr, 1, "Enable AltGr key (Do not assume R.Alt as Alt)"); static VT_SYSCTL_INT(enable_bell, 0, "Enable bell"); -static VT_SYSCTL_INT(debug, 0, "vt(9) debug level"); +static VT_SYSCTL_INT(debug, 0, "vt(4) debug level"); static VT_SYSCTL_INT(deadtimer, 15, "Time to wait busy process in VT_PROCESS mode"); static VT_SYSCTL_INT(suspendswitch, 1, "Switch to VT0 before suspend"); |