diff options
Diffstat (limited to 'sys')
50 files changed, 548 insertions, 313 deletions
diff --git a/sys/arm/allwinner/aw_sid.c b/sys/arm/allwinner/aw_sid.c index ba5faca33c5e..932c2f189e51 100644 --- a/sys/arm/allwinner/aw_sid.c +++ b/sys/arm/allwinner/aw_sid.c @@ -297,7 +297,7 @@ aw_sid_attach(device_t dev) /* Register ourself so device can resolve who we are */ OF_device_register_xref(OF_xref_from_node(node), dev); - for (i = 0; i < sc->sid_conf->nfuses ;i++) {\ + for (i = 0; i < sc->sid_conf->nfuses; i++) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, sc->sid_conf->efuses[i].name, diff --git a/sys/arm64/arm64/cpu_errata.c b/sys/arm64/arm64/cpu_errata.c index 989924bc0567..b876703a2a15 100644 --- a/sys/arm64/arm64/cpu_errata.c +++ b/sys/arm64/arm64/cpu_errata.c @@ -52,56 +52,11 @@ struct cpu_quirks { u_int flags; }; -static enum { - SSBD_FORCE_ON, - SSBD_FORCE_OFF, - SSBD_KERNEL, -} ssbd_method = SSBD_KERNEL; - -static cpu_quirk_install install_psci_bp_hardening; -static cpu_quirk_install install_ssbd_workaround; static cpu_quirk_install install_thunderx_bcast_tlbi_workaround; static struct cpu_quirks cpu_quirks[] = { { .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, - .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0), - .quirk_install = install_psci_bp_hardening, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, - .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0), - .quirk_install = install_psci_bp_hardening, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, - .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0), - .quirk_install = install_psci_bp_hardening, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, - .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0), - .quirk_install = install_psci_bp_hardening, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, - .midr_value = - CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0), - .quirk_install = install_psci_bp_hardening, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = 0, - .midr_value = 0, - .quirk_install = install_ssbd_workaround, - .flags = CPU_QUIRK_POST_DEVICE, - }, - { - .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, .midr_value = CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0), .quirk_install = install_thunderx_bcast_tlbi_workaround, @@ -114,57 +69,6 @@ static struct cpu_quirks cpu_quirks[] = { }, }; -static void -install_psci_bp_hardening(void) -{ - /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */ - if (!psci_present) - return; - - if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != SMCCC_RET_SUCCESS) - return; - - PCPU_SET(bp_harden, smccc_arch_workaround_1); -} - -static void -install_ssbd_workaround(void) -{ - char *env; - - if (PCPU_GET(cpuid) == 0) { - env = kern_getenv("kern.cfg.ssbd"); - if (env != NULL) { - if (strcmp(env, "force-on") == 0) { - ssbd_method = SSBD_FORCE_ON; - } else if (strcmp(env, "force-off") == 0) { - ssbd_method = SSBD_FORCE_OFF; - } - } - } - - /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */ - if (!psci_present) - return; - - /* Enable the workaround on this CPU if it's enabled in the firmware */ - if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS) - return; - - switch(ssbd_method) { - case SSBD_FORCE_ON: - smccc_arch_workaround_2(1); - break; - case SSBD_FORCE_OFF: - smccc_arch_workaround_2(0); - break; - case SSBD_KERNEL: - default: - PCPU_SET(ssbd, smccc_arch_workaround_2); - break; - } -} - /* * Workaround Cavium erratum 27456. * diff --git a/sys/arm64/arm64/spec_workaround.c b/sys/arm64/arm64/spec_workaround.c new file mode 100644 index 000000000000..7f4f86cdb48c --- /dev/null +++ b/sys/arm64/arm64/spec_workaround.c @@ -0,0 +1,166 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Arm Ltd + * Copyright (c) 2018 Andrew Turner + * + * This software was developed by SRI International and the University of + * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 + * ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/kernel.h> +#include <sys/pcpu.h> +#include <sys/systm.h> + +#include <machine/cpu.h> +#include <machine/cpu_feat.h> + +#include <dev/psci/psci.h> +#include <dev/psci/smccc.h> + +static enum { + SSBD_FORCE_ON, + SSBD_FORCE_OFF, + SSBD_KERNEL, +} ssbd_method = SSBD_KERNEL; + +struct psci_bp_hardening_impl { + u_int midr_mask; + u_int midr_value; +}; + +static struct psci_bp_hardening_impl psci_bp_hardening_impl[] = { + { + .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, + .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0), + }, + { + .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, + .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0), + }, + { + .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, + .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0), + }, + { + .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, + .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0), + }, + { + .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK, + .midr_value = + CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0), + } +}; + +static cpu_feat_en +psci_bp_hardening_check(const struct cpu_feat *feat __unused, u_int midr) +{ + size_t i; + + for (i = 0; i < nitems(psci_bp_hardening_impl); i++) { + if ((midr & psci_bp_hardening_impl[i].midr_mask) == + psci_bp_hardening_impl[i].midr_value) { + /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */ + if (!psci_present) + return (FEAT_ALWAYS_DISABLE); + + if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != + SMCCC_RET_SUCCESS) + return (FEAT_ALWAYS_DISABLE); + + return (FEAT_DEFAULT_ENABLE); + } + } + + return (FEAT_ALWAYS_DISABLE); +} + +static bool +psci_bp_hardening_enable(const struct cpu_feat *feat __unused, + cpu_feat_errata errata_status __unused, u_int *errata_list __unused, + u_int errata_count __unused) +{ + PCPU_SET(bp_harden, smccc_arch_workaround_1); + + return (true); +} + +CPU_FEAT(feat_csv2_missing, "Branch Predictor Hardening", + psci_bp_hardening_check, NULL, psci_bp_hardening_enable, NULL, + CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU); + +static cpu_feat_en +ssbd_workaround_check(const struct cpu_feat *feat __unused, u_int midr __unused) +{ + char *env; + + if (PCPU_GET(cpuid) == 0) { + env = kern_getenv("kern.cfg.ssbd"); + if (env != NULL) { + if (strcmp(env, "force-on") == 0) { + ssbd_method = SSBD_FORCE_ON; + } else if (strcmp(env, "force-off") == 0) { + ssbd_method = SSBD_FORCE_OFF; + } + } + } + + /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */ + if (!psci_present) + return (FEAT_ALWAYS_DISABLE); + + /* Enable the workaround on this CPU if it's enabled in the firmware */ + if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS) + return (FEAT_ALWAYS_DISABLE); + + return (FEAT_DEFAULT_ENABLE); +} + +static bool +ssbd_workaround_enable(const struct cpu_feat *feat __unused, + cpu_feat_errata errata_status __unused, u_int *errata_list __unused, + u_int errata_count __unused) +{ + switch(ssbd_method) { + case SSBD_FORCE_ON: + smccc_arch_workaround_2(1); + break; + case SSBD_FORCE_OFF: + smccc_arch_workaround_2(0); + break; + case SSBD_KERNEL: + default: + PCPU_SET(ssbd, smccc_arch_workaround_2); + break; + } + + return (true); +} + +CPU_FEAT(feat_ssbs_missing, "Speculator Store Bypass Disable Workaround", + ssbd_workaround_check, NULL, ssbd_workaround_enable, NULL, + CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU); diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c index 9c097f52d136..fd128e69f1f1 100644 --- a/sys/cam/scsi/scsi_all.c +++ b/sys/cam/scsi/scsi_all.c @@ -686,7 +686,7 @@ scsi_op_desc(uint16_t opcode, struct scsi_inquiry_data *inq_data) opmask = 1 << pd_type; for (j = 0; j < num_tables; j++) { - for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){ + for (i = 0; i < num_ops[j] && table[j][i].opcode <= opcode; i++) { if ((table[j][i].opcode == opcode) && ((table[j][i].opmask & opmask) != 0)) return(table[j][i].desc); diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c index 435874a9874a..3a362eaf11a4 100644 --- a/sys/cam/scsi/scsi_enc_ses.c +++ b/sys/cam/scsi/scsi_enc_ses.c @@ -2302,7 +2302,7 @@ ses_print_addl_data_sas_type0(char *sesname, struct sbuf *sbp, sbuf_putc(sbp, '\n'); if (addl->proto_data.sasdev_phys == NULL) return; - for (i = 0;i < addl->proto_hdr.sas->base_hdr.num_phys;i++) { + for (i = 0; i < addl->proto_hdr.sas->base_hdr.num_phys; i++) { phy = &addl->proto_data.sasdev_phys[i]; sbuf_printf(sbp, "%s: phy %d:", sesname, i); if (ses_elm_sas_dev_phy_sata_dev(phy)) @@ -2349,7 +2349,7 @@ ses_print_addl_data_sas_type1(char *sesname, struct sbuf *sbp, sbuf_printf(sbp, "Expander: %d phys", num_phys); if (addl->proto_data.sasexp_phys == NULL) return; - for (i = 0;i < num_phys;i++) { + for (i = 0; i < num_phys; i++) { exp_phy = &addl->proto_data.sasexp_phys[i]; sbuf_printf(sbp, "%s: phy %d: connector %d other %d\n", sesname, i, exp_phy->connector_index, @@ -2360,7 +2360,7 @@ ses_print_addl_data_sas_type1(char *sesname, struct sbuf *sbp, sbuf_printf(sbp, "Port: %d phys", num_phys); if (addl->proto_data.sasport_phys == NULL) return; - for (i = 0;i < num_phys;i++) { + for (i = 0; i < num_phys; i++) { port_phy = &addl->proto_data.sasport_phys[i]; sbuf_printf(sbp, "%s: phy %d: id %d connector %d other %d\n", diff --git a/sys/compat/linux/linux.c b/sys/compat/linux/linux.c index 61b207070963..a40f110634f7 100644 --- a/sys/compat/linux/linux.c +++ b/sys/compat/linux/linux.c @@ -578,8 +578,13 @@ bsd_to_linux_sockaddr(const struct sockaddr *sa, struct l_sockaddr **lsa, return (0); } +/* + * If sap is NULL, then osa points at already copied in linux sockaddr that + * should be edited in place. Otherwise memory is allocated, sockaddr + * copied in and returned in *sap. + */ int -linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap, +linux_to_bsd_sockaddr(struct l_sockaddr *osa, struct sockaddr **sap, socklen_t *len) { struct sockaddr *sa; @@ -609,10 +614,12 @@ linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap, } #endif - kosa = malloc(salen, M_SONAME, M_WAITOK); - - if ((error = copyin(osa, kosa, *len))) - goto out; + if (sap != NULL) { + kosa = malloc(salen, M_SONAME, M_WAITOK); + if ((error = copyin(osa, kosa, *len))) + goto out; + } else + kosa = osa; bdom = linux_to_bsd_domain(kosa->sa_family); if (bdom == AF_UNKNOWN) { @@ -686,12 +693,15 @@ linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap, sa->sa_family = bdom; sa->sa_len = salen; - *sap = sa; - *len = salen; + if (sap != NULL) { + *sap = sa; + *len = salen; + } return (0); out: - free(kosa, M_SONAME); + if (sap != NULL) + free(kosa, M_SONAME); return (error); } diff --git a/sys/compat/linux/linux_common.h b/sys/compat/linux/linux_common.h index 97f5a259f300..814c183b338a 100644 --- a/sys/compat/linux/linux_common.h +++ b/sys/compat/linux/linux_common.h @@ -43,7 +43,7 @@ sa_family_t bsd_to_linux_domain(sa_family_t domain); #define AF_UNKNOWN UINT8_MAX int bsd_to_linux_sockaddr(const struct sockaddr *sa, struct l_sockaddr **lsa, socklen_t len); -int linux_to_bsd_sockaddr(const struct l_sockaddr *lsa, +int linux_to_bsd_sockaddr(struct l_sockaddr *lsa, struct sockaddr **sap, socklen_t *len); void linux_to_bsd_poll_events(struct thread *td, int fd, short lev, short *bev); diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c index 0e07b0a60ced..b1a483ce611c 100644 --- a/sys/compat/linux/linux_socket.c +++ b/sys/compat/linux/linux_socket.c @@ -2146,7 +2146,8 @@ linux_setsockopt(struct thread *td, struct linux_setsockopt_args *args) return (ENOPROTOOPT); } - if (name == IPV6_NEXTHOP) { + switch (name) { + case IPV6_NEXTHOP: { len = args->optlen; error = linux_to_bsd_sockaddr(PTRIN(args->optval), &sa, &len); if (error != 0) @@ -2155,7 +2156,34 @@ linux_setsockopt(struct thread *td, struct linux_setsockopt_args *args) error = kern_setsockopt(td, args->s, level, name, sa, UIO_SYSSPACE, len); free(sa, M_SONAME); - } else { + break; + } + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + case MCAST_JOIN_SOURCE_GROUP: + case MCAST_LEAVE_SOURCE_GROUP: { + struct group_source_req req; + size_t size; + + size = (name == MCAST_JOIN_SOURCE_GROUP || + name == MCAST_LEAVE_SOURCE_GROUP) ? + sizeof(struct group_source_req) : sizeof(struct group_req); + + if ((error = copyin(PTRIN(args->optval), &req, size))) + return (error); + len = sizeof(struct sockaddr_storage); + if ((error = linux_to_bsd_sockaddr( + (struct l_sockaddr *)&req.gsr_group, NULL, &len))) + return (error); + if (size == sizeof(struct group_source_req) && + (error = linux_to_bsd_sockaddr( + (struct l_sockaddr *)&req.gsr_source, NULL, &len))) + return (error); + error = kern_setsockopt(td, args->s, level, name, &req, + UIO_SYSSPACE, size); + break; + } + default: error = kern_setsockopt(td, args->s, level, name, PTRIN(args->optval), UIO_USERSPACE, args->optlen); } diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index 856ea3af1372..2f412fa3cb1b 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -73,6 +73,7 @@ arm64/arm64/pmap.c standard arm64/arm64/ptrace_machdep.c standard arm64/arm64/sdt_machdep.c optional kdtrace_hooks arm64/arm64/sigtramp.S standard +arm64/arm64/spec_workaround.c standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/strcmp.S standard arm64/arm64/strncmp.S standard diff --git a/sys/contrib/libnv/bsd_nvpair.c b/sys/contrib/libnv/bsd_nvpair.c index c73bc2189121..b884dd260b84 100644 --- a/sys/contrib/libnv/bsd_nvpair.c +++ b/sys/contrib/libnv/bsd_nvpair.c @@ -985,13 +985,13 @@ nvpair_unpack_string_array(bool isbe __unused, nvpair_t *nvp, size = nvp->nvp_datasize; tmp = (const char *)ptr; for (ii = 0; ii < nvp->nvp_nitems; ii++) { - len = strnlen(tmp, size - 1) + 1; - size -= len; - if (tmp[len - 1] != '\0') { + if (size <= 0) { ERRNO_SET(EINVAL); return (NULL); } - if (size < 0) { + len = strnlen(tmp, size - 1) + 1; + size -= len; + if (tmp[len - 1] != '\0') { ERRNO_SET(EINVAL); return (NULL); } diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c index ace2360c032d..ebc2c0eeb6d2 100644 --- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c +++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c @@ -163,6 +163,13 @@ param_set_arc_int(SYSCTL_HANDLER_ARGS) return (0); } +static void +warn_deprecated_sysctl(const char *old, const char *new) +{ + printf("WARNING: sysctl vfs.zfs.%s is deprecated. Use vfs.zfs.%s instead.\n", + old, new); +} + int param_set_arc_max(SYSCTL_HANDLER_ARGS) { @@ -185,12 +192,15 @@ param_set_arc_max(SYSCTL_HANDLER_ARGS) if (val != 0) zfs_arc_max = arc_c_max; + if (arg2 != 0) + warn_deprecated_sysctl("arc_max", "arc.max"); + return (0); } SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - NULL, 0, param_set_arc_max, "LU", + NULL, 1, param_set_arc_max, "LU", "Maximum ARC size in bytes (LEGACY)"); int @@ -214,12 +224,15 @@ param_set_arc_min(SYSCTL_HANDLER_ARGS) if (val != 0) zfs_arc_min = arc_c_min; + if (arg2 != 0) + warn_deprecated_sysctl("arc_min", "arc.min"); + return (0); } SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - NULL, 0, param_set_arc_min, "LU", + NULL, 1, param_set_arc_min, "LU", "Minimum ARC size in bytes (LEGACY)"); extern uint_t zfs_arc_free_target; @@ -242,6 +255,9 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS) zfs_arc_free_target = val; + if (arg2 != 0) + warn_deprecated_sysctl("arc_free_target", "arc.free_target"); + return (0); } @@ -251,7 +267,7 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS) */ SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, - NULL, 0, param_set_arc_free_target, "IU", + NULL, 1, param_set_arc_free_target, "IU", "Desired number of free pages below which ARC triggers reclaim" " (LEGACY)"); @@ -270,12 +286,15 @@ param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS) arc_no_grow_shift = val; + if (arg2 != 0) + warn_deprecated_sysctl("arc_no_grow_shift", "arc.no_grow_shift"); + return (0); } SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - NULL, 0, param_set_arc_no_grow_shift, "I", + NULL, 1, param_set_arc_no_grow_shift, "I", "log2(fraction of ARC which must be free to allow growing) (LEGACY)"); extern uint64_t l2arc_write_max; @@ -746,12 +765,15 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS) zfs_vdev_min_auto_ashift = val; + if (arg2 != 0) + warn_deprecated_sysctl("min_auto_ashift", + "vdev.min_auto_ashift"); + return (0); } SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift, - CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift), + CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1, param_set_min_auto_ashift, "IU", "Min ashift used when creating new top-level vdev. (LEGACY)"); @@ -771,12 +793,15 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS) zfs_vdev_max_auto_ashift = val; + if (arg2 != 0) + warn_deprecated_sysctl("max_auto_ashift", + "vdev.max_auto_ashift"); + return (0); } SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift, - CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, - &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift), + CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1, param_set_max_auto_ashift, "IU", "Max ashift used when optimizing for logical -> physical sector size on" " new top-level vdevs. (LEGACY)"); diff --git a/sys/crypto/chacha20/chacha.c b/sys/crypto/chacha20/chacha.c index 52f7e18c651c..cb06003b0ecf 100644 --- a/sys/crypto/chacha20/chacha.c +++ b/sys/crypto/chacha20/chacha.c @@ -138,7 +138,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes) for (;;) { if (bytes < 64) { #ifndef KEYSTREAM_ONLY - for (i = 0;i < bytes;++i) tmp[i] = m[i]; + for (i = 0; i < bytes; ++i) tmp[i] = m[i]; m = tmp; #endif ctarget = c; @@ -160,7 +160,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes) x13 = j13; x14 = j14; x15 = j15; - for (i = 20;i > 0;i -= 2) { + for (i = 20; i > 0; i -= 2) { QUARTERROUND( x0, x4, x8,x12) QUARTERROUND( x1, x5, x9,x13) QUARTERROUND( x2, x6,x10,x14) @@ -240,7 +240,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes) if (bytes <= 64) { if (bytes < 64) { - for (i = 0;i < bytes;++i) ctarget[i] = c[i]; + for (i = 0; i < bytes; ++i) ctarget[i] = c[i]; } x->input[12] = j12; x->input[13] = j13; diff --git a/sys/crypto/openssl/ossl_sha256.c b/sys/crypto/openssl/ossl_sha256.c index 4613a9409b44..50cb9739d114 100644 --- a/sys/crypto/openssl/ossl_sha256.c +++ b/sys/crypto/openssl/ossl_sha256.c @@ -74,11 +74,11 @@ ossl_sha256_init(void *c_) unsigned int nn; \ switch ((c)->md_len) \ { case SHA224_DIGEST_LENGTH: \ - for (nn=0;nn<SHA224_DIGEST_LENGTH/4;nn++) \ + for (nn=0; nn < SHA224_DIGEST_LENGTH / 4; nn++) \ { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \ break; \ case SHA256_DIGEST_LENGTH: \ - for (nn=0;nn<SHA256_DIGEST_LENGTH/4;nn++) \ + for (nn=0; nn < SHA256_DIGEST_LENGTH / 4; nn++) \ { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \ break; \ default: \ diff --git a/sys/dev/aic7xxx/aic79xx.c b/sys/dev/aic7xxx/aic79xx.c index 2b5015b20e41..cee45fa5cc8a 100644 --- a/sys/dev/aic7xxx/aic79xx.c +++ b/sys/dev/aic7xxx/aic79xx.c @@ -7788,8 +7788,8 @@ ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, } if (role != ROLE_TARGET) { - for (;i < maxtarget; i++) { - for (j = minlun;j < maxlun; j++) { + for (; i < maxtarget; i++) { + for (j = minlun; j < maxlun; j++) { u_int scbid; u_int tcl; diff --git a/sys/dev/aic7xxx/aic7xxx.c b/sys/dev/aic7xxx/aic7xxx.c index c09876e9f589..18f68b806948 100644 --- a/sys/dev/aic7xxx/aic7xxx.c +++ b/sys/dev/aic7xxx/aic7xxx.c @@ -5903,8 +5903,8 @@ ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, } if (role != ROLE_TARGET) { - for (;i < maxtarget; i++) { - for (j = minlun;j < maxlun; j++) { + for (; i < maxtarget; i++) { + for (j = minlun; j < maxlun; j++) { u_int scbid; u_int tcl; diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c index 808397b229a7..53002f9d73ce 100644 --- a/sys/dev/enetc/if_enetc.c +++ b/sys/dev/enetc/if_enetc.c @@ -848,7 +848,7 @@ enetc_hash_vid(uint16_t vid) bool bit; int i; - for (i = 0;i < 6;i++) { + for (i = 0; i < 6; i++) { bit = vid & BIT(i); bit ^= !!(vid & BIT(i + 6)); hash |= bit << i; @@ -1020,7 +1020,7 @@ enetc_msix_intr_assign(if_ctx_t ctx, int msix) ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR)); } vector = 0; - for (i = 0;i < sc->tx_num_queues; i++, vector++) { + for (i = 0; i < sc->tx_num_queues; i++, vector++) { tx_queue = &sc->tx_queues[i]; snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, &tx_queue->irq, @@ -1130,7 +1130,7 @@ enetc_isc_txd_encap(void *data, if_pkt_info_t ipi) } /* Now add remaining descriptors. */ - for (;i < ipi->ipi_nsegs; i++) { + for (; i < ipi->ipi_nsegs; i++) { desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->addr = segs[i].ds_addr; diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c index 5c4718bf582f..f3d58f285b39 100644 --- a/sys/dev/hptmv/entry.c +++ b/sys/dev/hptmv/entry.c @@ -430,7 +430,7 @@ static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plug if(pVDev->pParent) { int iMember; - for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) + for (iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; @@ -984,7 +984,7 @@ fRegisterVdevice(IAL_ADAPTER_T *pAdapter) PVBus pVBus; int i,j; - for(i=0;i<MV_SATA_CHANNELS_NUM;i++) { + for (i = 0; i < MV_SATA_CHANNELS_NUM; i++) { pPhysical = &(pAdapter->VDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; @@ -1027,8 +1027,7 @@ GetSpareDisk(_VBUS_ARG PVDevice pArray) PVDevice pVDevice, pFind = NULL; int i; - for(i=0;i<MV_SATA_CHANNELS_NUM;i++) - { + for (i=0; i < MV_SATA_CHANNELS_NUM; i++) { pVDevice = &pAdapter->VDevices[i]; if(!pVDevice) continue; @@ -1356,7 +1355,7 @@ unregister: goto unregister; } - for (i=0; i<MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) { + for (i = 0; i < MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) { FreeCommand(_VBUS_P &(pAdapter->pCommandBlocks[i])); } @@ -1370,7 +1369,7 @@ unregister: memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; - for (i=0; i < MAX_QUEUE_COMM; i++) { + for (i = 0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); @@ -1398,7 +1397,7 @@ unregister: pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; - for (i=0; i<PRD_TABLES_FOR_VBUS; i++) + for (i = 0; i < PRD_TABLES_FOR_VBUS; i++) { /* KdPrint(("i=%d,pAdapter->pFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); @@ -1447,7 +1446,7 @@ unregister: } #ifdef SUPPORT_ARRAY - for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { + for (i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } @@ -1467,7 +1466,7 @@ unregister: _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); - for (channel=0;channel<MV_SATA_CHANNELS_NUM;channel++) { + for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pVDev = _vbus_p->pVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); @@ -1567,7 +1566,7 @@ fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; - for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { + for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } @@ -1590,7 +1589,7 @@ check_cmds: dataxfer_poll(); xor_poll(); #endif - for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { + for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { @@ -1716,7 +1715,7 @@ fDeviceSendCommand(_VBUS_ARG PCommand pCmd) MV_BOOLEAN is48bit; MV_U8 channel; - int i=0; + int i = 0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); @@ -2141,7 +2140,7 @@ FlushAdapter(IAL_ADAPTER_T *pAdapter) hpt_printk(("flush all devices\n")); /* flush all devices */ - for (i=0; i<MAX_VDEVICE_PER_VBUS; i++) { + for (i = 0; i < MAX_VDEVICE_PER_VBUS; i++) { PVDevice pVDev = pAdapter->VBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } @@ -2174,7 +2173,7 @@ Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { int i; PVDevice pArray; - for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ + for (i = 0; i < MAX_ARRAY_PER_VBUS; i++) { if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { @@ -2378,7 +2377,7 @@ hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) static void hpt_worker_thread(void) { - for(;;) { + for (;;) { mtx_lock(&DpcQueue_Lock); while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; @@ -2418,7 +2417,7 @@ static void hpt_worker_thread(void) mtx_lock(&pAdapter->lock); _vbus_p = &pAdapter->VBus; - for (i=0;i<MAX_ARRAY_PER_VBUS;i++) + for (i = 0; i < MAX_ARRAY_PER_VBUS; i++) { if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; @@ -2472,7 +2471,7 @@ launch_worker_thread(void) int i; PVDevice pVDev; - for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) + for (i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ diff --git a/sys/dev/hptmv/gui_lib.c b/sys/dev/hptmv/gui_lib.c index d78fdcca69d2..f11044db733a 100644 --- a/sys/dev/hptmv/gui_lib.c +++ b/sys/dev/hptmv/gui_lib.c @@ -86,8 +86,7 @@ check_VDevice_valid(PVDevice p) while(pAdapter != NULL) { _vbus_p = &pAdapter->VBus; - for (i=0;i<MAX_ARRAY_PER_VBUS;i++) - { + for (i = 0; i<MAX_ARRAY_PER_VBUS; i++) { pVDevice=ArrayTables(i); if ((pVDevice->u.array.dArStamp != 0) && (pVDevice == p)) return 0; @@ -244,9 +243,9 @@ static void get_array_info(PVDevice pVDevice, PHPT_ARRAY_INFO pArrayInfo) if(pVDevice->u.array.pMember[i] != NULL) pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]); - for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS; i++) + for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS; i++) pArrayInfo->Members[i] = INVALID_DEVICEID; - } +} static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo) { @@ -266,7 +265,7 @@ static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo) if(pVDevice->u.array.pMember[i] != NULL) pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]); - for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS_V2; i++) + for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS_V2; i++) pArrayInfo->Members[i] = INVALID_DEVICEID; } #endif @@ -461,8 +460,7 @@ found: pInfo->IoPort = 0; pInfo->ControlPort = 0; - for (i=0; i<2 ;i++) - { + for (i = 0; i < 2; i++) { pInfo->Devices[i] = (DEVICEID)INVALID_DEVICEID; } diff --git a/sys/dev/hptmv/hptproc.c b/sys/dev/hptmv/hptproc.c index 38fe61ee7e04..328750d9034c 100644 --- a/sys/dev/hptmv/hptproc.c +++ b/sys/dev/hptmv/hptproc.c @@ -107,7 +107,7 @@ hpt_set_asc_info(IAL_ADAPTER_T *pAdapter, char *buffer,int length) return -EINVAL; } - for (i=0;i<MV_SATA_CHANNELS_NUM;i++) + for (i = 0; i < MV_SATA_CHANNELS_NUM; i++) if(i == ichan) goto rebuild; diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index 261f76055901..bfaf6cd69e58 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -1480,17 +1480,33 @@ ixl_if_multi_set(if_ctx_t ctx) struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; + enum i40e_status_code status; int mcnt; + if_t ifp = iflib_get_ifp(ctx); IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); /* Delete filters for removed multicast addresses */ ixl_del_multi(vsi, false); - mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR); + mcnt = min(if_llmaddr_count(ifp), MAX_MULTICAST_ADDR); if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { - i40e_aq_set_vsi_multicast_promiscuous(hw, + /* Check if promisc mode is already enabled, if yes return */ + if (vsi->flags & IXL_FLAGS_MC_PROMISC) + return; + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); + if (status != I40E_SUCCESS) + if_printf(ifp, "Failed to enable multicast promiscuous " + "mode, status: %s\n", i40e_stat_str(hw, status)); + else { + if_printf(ifp, "Enabled multicast promiscuous mode\n"); + + /* Set the flag to track promiscuous mode */ + vsi->flags |= IXL_FLAGS_MC_PROMISC; + } + /* Delete all existing MC filters */ ixl_del_multi(vsi, true); return; } @@ -1693,6 +1709,13 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags) return (err); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); + + /* Update the multicast promiscuous flag based on the new state */ + if (multi) + vsi->flags |= IXL_FLAGS_MC_PROMISC; + else + vsi->flags &= ~IXL_FLAGS_MC_PROMISC; + return (err); } diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h index 95379448b570..ab0f38307d90 100644 --- a/sys/dev/ixl/ixl.h +++ b/sys/dev/ixl/ixl.h @@ -202,6 +202,7 @@ #define IXL_FLAGS_KEEP_TSO6 (1 << 1) #define IXL_FLAGS_USES_MSIX (1 << 2) #define IXL_FLAGS_IS_VF (1 << 3) +#define IXL_FLAGS_MC_PROMISC (1 << 4) #define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0) #define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0) diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c index 1752efc02fff..b62619ced5cb 100644 --- a/sys/dev/ixl/ixl_pf_main.c +++ b/sys/dev/ixl/ixl_pf_main.c @@ -593,24 +593,29 @@ ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) * Routines for multicast and vlan filter management. * *********************************************************************/ + +/** + * ixl_add_multi - Add multicast filters to the hardware + * @vsi: The VSI structure + * + * In case number of multicast filters in the IFP exceeds 127 entries, + * multicast promiscuous mode will be enabled and the filters will be removed + * from the hardware + */ void ixl_add_multi(struct ixl_vsi *vsi) { if_t ifp = vsi->ifp; - struct i40e_hw *hw = vsi->hw; int mcnt = 0; struct ixl_add_maddr_arg cb_arg; IOCTL_DEBUGOUT("ixl_add_multi: begin"); - mcnt = if_llmaddr_count(ifp); - if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { - i40e_aq_set_vsi_multicast_promiscuous(hw, - vsi->seid, TRUE, NULL); - /* delete all existing MC filters */ - ixl_del_multi(vsi, true); - return; - } + /* + * There is no need to check if the number of multicast addresses + * exceeds the MAX_MULTICAST_ADDR threshold and set promiscuous mode + * here, as all callers already handle this case. + */ cb_arg.vsi = vsi; LIST_INIT(&cb_arg.to_add); @@ -633,30 +638,103 @@ ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) return (0); } +/** + * ixl_dis_multi_promisc - Disable multicast promiscuous mode + * @vsi: The VSI structure + * @vsi_mcnt: Number of multicast filters in the VSI + * + * Disable multicast promiscuous mode based on number of entries in the IFP + * and the VSI, then re-add multicast filters. + * + */ +static void +ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt) +{ + struct ifnet *ifp = vsi->ifp; + struct i40e_hw *hw = vsi->hw; + int ifp_mcnt = 0; + enum i40e_status_code status; + + /* + * Check if multicast promiscuous mode was actually enabled. + * If promiscuous mode was not enabled, don't attempt to disable it. + * Also, don't disable if IFF_PROMISC or IFF_ALLMULTI is set. + */ + if (!(vsi->flags & IXL_FLAGS_MC_PROMISC) || + (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI))) + return; + + ifp_mcnt = if_llmaddr_count(ifp); + /* + * Equal lists or empty ifp list mean the list has not been changed + * and in such case avoid disabling multicast promiscuous mode as it + * was not previously enabled. Case where multicast promiscuous mode has + * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0. + */ + if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 || + ifp_mcnt >= MAX_MULTICAST_ADDR) + return; + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + FALSE, NULL); + if (status != I40E_SUCCESS) { + if_printf(ifp, "Failed to disable multicast promiscuous " + "mode, status: %s\n", i40e_stat_str(hw, status)); + + return; + } + + /* Clear the flag since promiscuous mode is now disabled */ + vsi->flags &= ~IXL_FLAGS_MC_PROMISC; + if_printf(ifp, "Disabled multicast promiscuous mode\n"); + + ixl_add_multi(vsi); +} + +/** + * ixl_del_multi - Delete multicast filters from the hardware + * @vsi: The VSI structure + * @all: Bool to determine if all the multicast filters should be removed + * + * In case number of multicast filters in the IFP drops to 127 entries, + * multicast promiscuous mode will be disabled and the filters will be reapplied + * to the hardware. + */ void ixl_del_multi(struct ixl_vsi *vsi, bool all) { - struct ixl_ftl_head to_del; + int to_del_cnt = 0, vsi_mcnt = 0; if_t ifp = vsi->ifp; struct ixl_mac_filter *f, *fn; - int mcnt = 0; + struct ixl_ftl_head to_del; IOCTL_DEBUGOUT("ixl_del_multi: begin"); LIST_INIT(&to_del); /* Search for removed multicast addresses */ LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { - if ((f->flags & IXL_FILTER_MC) == 0 || - (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) + if ((f->flags & IXL_FILTER_MC) == 0) + continue; + + /* Count all the multicast filters in the VSI for comparison */ + vsi_mcnt++; + + if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0) continue; LIST_REMOVE(f, ftle); LIST_INSERT_HEAD(&to_del, f, ftle); - mcnt++; + to_del_cnt++; } - if (mcnt > 0) - ixl_del_hw_filters(vsi, &to_del, mcnt); + if (to_del_cnt > 0) { + ixl_del_hw_filters(vsi, &to_del, to_del_cnt); + return; + } + + ixl_dis_multi_promisc(vsi, vsi_mcnt); + + IOCTL_DEBUGOUT("ixl_del_multi: end"); } void diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c index d69c8ea5fded..fa0f817ed67b 100644 --- a/sys/dev/mps/mps_sas.c +++ b/sys/dev/mps/mps_sas.c @@ -858,7 +858,7 @@ mps_detach_sas(struct mps_softc *sc) if (sassc->devq != NULL) cam_simq_free(sassc->devq); - for(i=0; i< sassc->maxtargets ;i++) { + for (i = 0; i < sassc->maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPT2); @@ -3396,7 +3396,7 @@ mpssas_realloc_targets(struct mps_softc *sc, int maxtargets) * the allocated LUNs for each target and then the target buffer * itself. */ - for (i=0; i< maxtargets; i++) { + for (i = 0; i < maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPT2); diff --git a/sys/dev/mpt/mpt_raid.c b/sys/dev/mpt/mpt_raid.c index 5ff08ffcf2b3..2b868f6ef070 100644 --- a/sys/dev/mpt/mpt_raid.c +++ b/sys/dev/mpt/mpt_raid.c @@ -830,7 +830,7 @@ mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt) } ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; - for (;ioc_vol != ioc_last_vol; ioc_vol++) { + for (; ioc_vol != ioc_last_vol; ioc_vol++) { if (ioc_vol->VolumeID == tgt) { return (1); } @@ -1406,7 +1406,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt) ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; - for (;ioc_vol != ioc_last_vol; ioc_vol++) { + for (; ioc_vol != ioc_last_vol; ioc_vol++) { struct mpt_raid_volume *mpt_vol; mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber; diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c index 4625c2616562..265181ef7ad0 100644 --- a/sys/dev/nfe/if_nfe.c +++ b/sys/dev/nfe/if_nfe.c @@ -2078,7 +2078,7 @@ nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_POSTREAD); - for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { + for (prog = 0; ; NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; @@ -2192,7 +2192,7 @@ nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_POSTREAD); - for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), + for (prog = 0; ; NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; diff --git a/sys/dev/ocs_fc/ocs_mgmt.c b/sys/dev/ocs_fc/ocs_mgmt.c index 726b499f28ba..5b7f6557c017 100644 --- a/sys/dev/ocs_fc/ocs_mgmt.c +++ b/sys/dev/ocs_fc/ocs_mgmt.c @@ -226,7 +226,7 @@ ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf) ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); - for (i=0;i<ARRAY_SIZE(mgmt_table);i++) { + for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) { access = 0; if (mgmt_table[i].get_handler) { access |= MGMT_MODE_RD; @@ -305,7 +305,7 @@ ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) + 1; - for (i=0;i<ARRAY_SIZE(mgmt_table);i++) { + for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) { if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) { if (mgmt_table[i].get_handler) { mgmt_table[i].get_handler(ocs, name, textbuf); @@ -387,7 +387,7 @@ ocs_mgmt_set(ocs_t *ocs, char *name, char *value) char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can set */ - for (i=0;i<ARRAY_SIZE(mgmt_table);i++) { + for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) { if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) { if (mgmt_table[i].set_handler) { return mgmt_table[i].set_handler(ocs, name, value); @@ -469,7 +469,7 @@ ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in, char *unqualified_name = action + strlen(qualifier) +1; /* See if it's an action I can perform */ - for (i=0;i<ARRAY_SIZE(mgmt_table); i++) { + for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) { if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) { if (mgmt_table[i].action_handler) { return mgmt_table[i].action_handler(ocs, action, arg_in, arg_in_length, @@ -527,7 +527,7 @@ ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf) ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); - for (i=0;i<ARRAY_SIZE(mgmt_table);i++) { + for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) { if (mgmt_table[i].get_handler) { mgmt_table[i].get_handler(ocs, mgmt_table[i].name, textbuf); } else if (mgmt_table[i].action_handler) { @@ -1212,7 +1212,7 @@ get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; - for (i=0; i < bytes_read; i++) { + for (i = 0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; @@ -2040,7 +2040,7 @@ get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO); bytes_left = BUFFER_SIZE; - for (i=0; i<result.list->num_descriptors; i++) { + for (i = 0; i < result.list->num_descriptors; i++) { sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id, result.list->descriptors[i].profile_description); if (strlen(result_line) < bytes_left) { diff --git a/sys/dev/ppc/ppc.c b/sys/dev/ppc/ppc.c index 9870379e2eba..de75f4747709 100644 --- a/sys/dev/ppc/ppc.c +++ b/sys/dev/ppc/ppc.c @@ -1389,7 +1389,7 @@ ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq) /* let's suppose the next instr. is the same */ prefetch: - for (;mi->opcode == MS_OP_RASSERT; INCR_PC) + for (; mi->opcode == MS_OP_RASSERT; INCR_PC) w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i); if (mi->opcode == MS_OP_DELAY) { diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c index f000d9ce9db3..88dcf45dd08a 100644 --- a/sys/dev/smartpqi/smartpqi_event.c +++ b/sys/dev/smartpqi/smartpqi_event.c @@ -115,7 +115,7 @@ pqisrc_ack_all_events(void *arg1) pending_event = &softs->pending_events[0]; - for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { + for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { if (pending_event->pending == true) { pending_event->pending = false; pqisrc_acknowledge_event(softs, pending_event); @@ -417,7 +417,7 @@ pqisrc_report_event_config(pqisrc_softstate_t *softs) softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, PQI_MAX_EVENT_DESCRIPTORS) ; - for (i=0; i < softs->event_config.num_event_descriptors ;i++){ + for (i = 0; i < softs->event_config.num_event_descriptors; i++) { softs->event_config.descriptors[i].event_type = event_config_p->descriptors[i].event_type; } @@ -477,7 +477,7 @@ pqisrc_set_event_config(pqisrc_softstate_t *softs) event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; - for (i=0; i < softs->event_config.num_event_descriptors ; i++){ + for (i = 0; i < softs->event_config.num_event_descriptors; i++) { event_config_p->descriptors[i].event_type = softs->event_config.descriptors[i].event_type; if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c index 2e80b01b5436..f05c951cd4f9 100644 --- a/sys/dev/smartpqi/smartpqi_queue.c +++ b/sys/dev/smartpqi/smartpqi_queue.c @@ -700,7 +700,7 @@ pqisrc_create_op_obq(pqisrc_softstate_t *softs, } else { int i = 0; DBG_WARN("Error Status Descriptors\n"); - for(i = 0; i < 4;i++) + for (i = 0; i < 4; i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]); } @@ -743,7 +743,7 @@ pqisrc_create_op_ibq(pqisrc_softstate_t *softs, } else { int i = 0; DBG_WARN("Error Status Decsriptors\n"); - for(i = 0; i < 4;i++) + for (i = 0; i < 4; i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]); } diff --git a/sys/dev/sym/sym_hipd.c b/sys/dev/sym/sym_hipd.c index fa65d544e17d..b4e5c1075fb4 100644 --- a/sys/dev/sym/sym_hipd.c +++ b/sys/dev/sym/sym_hipd.c @@ -3266,7 +3266,7 @@ static void sym_init (hcb_p np, int reason) * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ - for (i=0;i<SYM_CONF_MAX_TARGET;i++) { + for (i = 0; i < SYM_CONF_MAX_TARGET; i++) { tcb_p tp = &np->target[i]; tp->to_reset = 0; @@ -3715,7 +3715,7 @@ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) } printf ("%s: regdump:", sym_name(np)); - for (i=0; i<24;i++) + for (i = 0; i < 24; i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); @@ -5527,8 +5527,8 @@ static int sym_show_msg (u_char * msg) u_char i; printf ("%x",*msg); if (*msg==M_EXTENDED) { - for (i=1;i<8;i++) { - if (i-1>msg[1]) break; + for (i = 1; i < 8; i++) { + if (i - 1 > msg[1]) break; printf ("-%x",msg[i]); } return (i+1); @@ -6744,10 +6744,10 @@ restart_test: /* * Wait 'til done (with timeout) */ - for (i=0; i<SYM_SNOOP_TIMEOUT; i++) + for (i = 0; i < SYM_SNOOP_TIMEOUT; i++) if (INB(nc_istat) & (INTF|SIP|DIP)) break; - if (i>=SYM_SNOOP_TIMEOUT) { + if (i >= SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c index af151c8c4f06..fccd6689a6aa 100644 --- a/sys/dev/tws/tws.c +++ b/sys/dev/tws/tws.c @@ -311,7 +311,7 @@ attach_fail_4: if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); attach_fail_3: - for(i=0;i<sc->irqs;i++) { + for (i = 0; i < sc->irqs; i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) @@ -369,7 +369,7 @@ tws_detach(device_t dev) tws_teardown_intr(sc); /* Release irq resource */ - for(i=0;i<sc->irqs;i++) { + for (i = 0; i < sc->irqs; i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) @@ -402,7 +402,7 @@ tws_detach(device_t dev) TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id); } - for ( i=0; i< tws_queue_depth; i++) { + for (i = 0; i < tws_queue_depth; i++) { if (sc->reqs[i].dma_map) bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map); callout_drain(&sc->reqs[i].timeout); @@ -432,7 +432,7 @@ tws_setup_intr(struct tws_softc *sc, int irqs) { int i, error; - for(i=0;i<irqs;i++) { + for (i = 0; i < irqs; i++) { if (!(sc->intr_handle[i])) { if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i], INTR_TYPE_CAM | INTR_MPSAFE, @@ -452,7 +452,7 @@ tws_teardown_intr(struct tws_softc *sc) { int i; - for(i=0;i<sc->irqs;i++) { + for (i = 0; i < sc->irqs; i++) { if (sc->intr_handle[i]) { bus_teardown_intr(sc->tws_dev, sc->irq_res[i], sc->intr_handle[i]); @@ -669,8 +669,7 @@ tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size) bzero(cmd_buf, dma_mem_size); TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0); mtx_lock(&sc->q_lock); - for ( i=0; i< tws_queue_depth; i++) - { + for (i = 0; i < tws_queue_depth; i++) { if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) { /* log a ENOMEM failure msg here */ mtx_unlock(&sc->q_lock); diff --git a/sys/dev/tws/tws_services.c b/sys/dev/tws/tws_services.c index da8bbacc39f7..e5c3d45c533f 100644 --- a/sys/dev/tws/tws_services.c +++ b/sys/dev/tws/tws_services.c @@ -200,7 +200,7 @@ tws_init_qs(struct tws_softc *sc) { mtx_lock(&sc->q_lock); - for(int i=0;i<TWS_MAX_QS;i++) { + for (int i = 0; i < TWS_MAX_QS; i++) { sc->q_head[i] = NULL; sc->q_tail[i] = NULL; } diff --git a/sys/fs/devfs/devfs_dir.c b/sys/fs/devfs/devfs_dir.c index 3dc87538017d..aad87606e738 100644 --- a/sys/fs/devfs/devfs_dir.c +++ b/sys/fs/devfs/devfs_dir.c @@ -162,7 +162,7 @@ int devfs_pathpath(const char *p1, const char *p2) { - for (;;p1++, p2++) { + for (;; p1++, p2++) { if (*p1 != *p2) { if (*p1 == '/' && *p2 == '\0') return (1); diff --git a/sys/fs/udf/osta.c b/sys/fs/udf/osta.c index f79b86993367..1a083d8c26b1 100644 --- a/sys/fs/udf/osta.c +++ b/sys/fs/udf/osta.c @@ -383,7 +383,7 @@ int UDFTransName( int maxFilenameLen; /* Translate extension, and store it in ext. */ for(index = 0; index<EXT_SIZE && - extIndex + index +1 < udfLen; index++ ) { + extIndex + index +1 < udfLen; index++) { current = udfName[extIndex + index + 1]; if (IsIllegal(current) || !UnicodeIsPrint(current)) { @@ -432,7 +432,7 @@ int UDFTransName( /* Place a translated extension at end, if found. */ if (hasExt) { newName[newIndex++] = PERIOD; - for (index = 0;index < localExtIndex ;index++ ) { + for (index = 0; index < localExtIndex; index++) { newName[newIndex++] = ext[index]; } } diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c index 26fa14603c85..66fee97a07d5 100644 --- a/sys/fs/unionfs/union_vnops.c +++ b/sys/fs/unionfs/union_vnops.c @@ -2208,7 +2208,6 @@ unionfs_lock_restart: vholdnz(tvp); VI_UNLOCK(vp); error = VOP_LOCK(tvp, flags); - vdrop(tvp); if (error == 0 && (lvp_locked || VTOUNIONFS(vp) == NULL)) { /* * After dropping the interlock above, there exists a window @@ -2234,6 +2233,7 @@ unionfs_lock_restart: unp = VTOUNIONFS(vp); if (unp == NULL || unp->un_uppervp != NULL) { VOP_UNLOCK(tvp); + vdrop(tvp); /* * If we previously held the lock, the upgrade may * have temporarily dropped the lock, in which case @@ -2249,6 +2249,7 @@ unionfs_lock_restart: goto unionfs_lock_restart; } } + vdrop(tvp); return (error); } @@ -2259,7 +2260,6 @@ unionfs_unlock(struct vop_unlock_args *ap) struct vnode *vp; struct vnode *tvp; struct unionfs_node *unp; - int error; KASSERT_UNIONFS_VNODE(ap->a_vp); @@ -2271,11 +2271,7 @@ unionfs_unlock(struct vop_unlock_args *ap) tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp); - vholdnz(tvp); - error = VOP_UNLOCK(tvp); - vdrop(tvp); - - return (error); + return (VOP_UNLOCK(tvp)); } static int diff --git a/sys/i386/i386/in_cksum_machdep.c b/sys/i386/i386/in_cksum_machdep.c index 27ab09d82da0..b658d85bc892 100644 --- a/sys/i386/i386/in_cksum_machdep.c +++ b/sys/i386/i386/in_cksum_machdep.c @@ -84,7 +84,7 @@ in_cksum_skip(struct mbuf *m, int len, int skip) } } - for (;m && len; m = m->m_next) { + for (; m && len; m = m->m_next) { if (m->m_len == 0) continue; w = mtod(m, u_short *); diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index c53707a1286c..a1fabbc86f27 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -2831,7 +2831,7 @@ __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote, } if ((const char *)note_end - (const char *)note < sizeof(Elf_Note)) { - uprintf("ELF note to short\n"); + uprintf("ELF note too short\n"); goto retf; } if (note->n_namesz != checknote->n_namesz || @@ -2839,9 +2839,9 @@ __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote, note->n_type != checknote->n_type) goto nextnote; note_name = (const char *)(note + 1); - if (note_name + checknote->n_namesz >= - (const char *)note_end || strncmp(note_vendor, - note_name, checknote->n_namesz) != 0) + if (note_name + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + + note->n_descsz >= (const char *)note_end || + strncmp(note_vendor, note_name, checknote->n_namesz) != 0) goto nextnote; if (cb(note, cb_arg, &res)) diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index ab8ed32ad189..c4b1c8201ff2 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -807,7 +807,7 @@ kern_abort2(struct thread *td, const char *why, int nargs, void **uargs) } if (nargs > 0) { sbuf_putc(sb, '('); - for (i = 0;i < nargs; i++) + for (i = 0; i < nargs; i++) sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]); sbuf_putc(sb, ')'); } diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index e919b15543b2..fcbfbe64f854 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -1302,7 +1302,7 @@ mallocinit(void *dummy) #endif align, UMA_ZONE_MALLOC); } - for (;i <= size; i+= KMEM_ZBASE) + for (; i <= size; i+= KMEM_ZBASE) kmemsize[i >> KMEM_ZSHIFT] = indx; } } diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c index 07a9cc0f57be..c4d0223d484f 100644 --- a/sys/kern/subr_devstat.c +++ b/sys/kern/subr_devstat.c @@ -415,7 +415,7 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS) if (error != 0) return (error); - for (;nds != NULL;) { + while (nds != NULL) { error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); if (error != 0) return (error); diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c index db0ceb17b9f0..e2070ae3f865 100644 --- a/sys/kern/subr_prf.c +++ b/sys/kern/subr_prf.c @@ -766,7 +766,7 @@ reswitch: switch (ch = (u_char)*fmt++) { PCHAR(hex2ascii(*up & 0x0f)); up++; if (width) - for (q=p;*q;q++) + for (q = p; *q; q++) PCHAR(*q); } break; diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c index c5fc1e84ce3f..90489e99491a 100644 --- a/sys/kern/uipc_usrreq.c +++ b/sys/kern/uipc_usrreq.c @@ -1559,15 +1559,19 @@ restart: mc_init_m(&cmc, control); SOCK_RECVBUF_LOCK(so); - MPASS(!(sb->sb_state & SBS_CANTRCVMORE)); - - if (__predict_false(cmc.mc_len + sb->sb_ccc + - sb->sb_ctl > sb->sb_hiwat)) { + if (__predict_false( + (sb->sb_state & SBS_CANTRCVMORE) || + cmc.mc_len + sb->sb_ccc + sb->sb_ctl > + sb->sb_hiwat)) { /* - * Too bad, while unp_externalize() was - * failing, the other side had filled - * the buffer and we can't prepend data - * back. Losing data! + * While the lock was dropped and we + * were failing in unp_externalize(), + * the peer could has a) disconnected, + * b) filled the buffer so that we + * can't prepend data back. + * These are two edge conditions that + * we just can't handle, so lose the + * data and return the error. */ SOCK_RECVBUF_UNLOCK(so); SOCK_IO_RECV_UNLOCK(so); diff --git a/sys/netinet/in_mcast.c b/sys/netinet/in_mcast.c index f5b20c49ffd2..ba112afbf002 100644 --- a/sys/netinet/in_mcast.c +++ b/sys/netinet/in_mcast.c @@ -159,9 +159,6 @@ static struct ip_moptions * static int inp_get_source_filters(struct inpcb *, struct sockopt *); static int inp_join_group(struct inpcb *, struct sockopt *); static int inp_leave_group(struct inpcb *, struct sockopt *); -static struct ifnet * - inp_lookup_mcast_ifp(const struct inpcb *, - const struct sockaddr_in *, const struct in_addr); static int inp_block_unblock_source(struct inpcb *, struct sockopt *); static int inp_set_multicast_if(struct inpcb *, struct sockopt *); static int inp_set_source_filters(struct inpcb *, struct sockopt *); @@ -1832,69 +1829,55 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) } /* - * Look up the ifnet to use for a multicast group membership, - * given the IPv4 address of an interface, and the IPv4 group address. - * - * This routine exists to support legacy multicast applications - * which do not understand that multicast memberships are scoped to - * specific physical links in the networking stack, or which need - * to join link-scope groups before IPv4 addresses are configured. - * - * Use this socket's current FIB number for any required FIB lookup. - * If ina is INADDR_ANY, look up the group address in the unicast FIB, - * and use its ifp; usually, this points to the default next-hop. - * - * If the FIB lookup fails, attempt to use the first non-loopback - * interface with multicast capability in the system as a - * last resort. The legacy IPv4 ASM API requires that we do - * this in order to allow groups to be joined when the routing - * table has not yet been populated during boot. - * - * Returns NULL if no ifp could be found, otherwise return referenced ifp. + * Look up the ifnet to join a multicast group membership via legacy + * IP_ADD_MEMBERSHIP or via more modern MCAST_JOIN_GROUP. * - * FUTURE: Implement IPv4 source-address selection. + * If the interface index was specified explicitly, just use it. If the + * address was specified (legacy), try to find matching interface. Else + * (index == 0 && no address) do a route lookup. If that fails for a modern + * MCAST_JOIN_GROUP return failure, for legacy IP_ADD_MEMBERSHIP find first + * multicast capable interface. */ static struct ifnet * -inp_lookup_mcast_ifp(const struct inpcb *inp, - const struct sockaddr_in *gsin, const struct in_addr ina) +inp_lookup_mcast_ifp(const struct inpcb *inp, const struct in_addr maddr, +const struct in_addr *ina, const u_int index) { struct ifnet *ifp; struct nhop_object *nh; NET_EPOCH_ASSERT(); - KASSERT(inp != NULL, ("%s: inp must not be NULL", __func__)); - KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__)); - KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)), - ("%s: not multicast", __func__)); - ifp = NULL; - if (!in_nullhost(ina)) { - INADDR_TO_IFP(ina, ifp); + if (index != 0) + return (ifnet_byindex_ref(index)); + + if (ina != NULL && !in_nullhost(*ina)) { + INADDR_TO_IFP(*ina, ifp); if (ifp != NULL) if_ref(ifp); - } else { - nh = fib4_lookup(inp->inp_inc.inc_fibnum, gsin->sin_addr, 0, NHR_NONE, 0); - if (nh != NULL) { - ifp = nh->nh_ifp; - if_ref(ifp); - } else { - struct in_ifaddr *ia; - struct ifnet *mifp; - - mifp = NULL; - CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { - mifp = ia->ia_ifp; - if (!(mifp->if_flags & IFF_LOOPBACK) && - (mifp->if_flags & IFF_MULTICAST)) { - ifp = mifp; - if_ref(ifp); - break; - } + return (ifp); + } + + nh = fib4_lookup(inp->inp_inc.inc_fibnum, maddr, 0, NHR_NONE, 0); + if (nh != NULL) { + ifp = nh->nh_ifp; + if_ref(ifp); + return (ifp); + } + + if (ina != NULL) { + struct in_ifaddr *ia; + + CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { + if (!(ia->ia_ifp->if_flags & IFF_LOOPBACK) && + (ia->ia_ifp->if_flags & IFF_MULTICAST)) { + ifp = ia->ia_ifp; + if_ref(ifp); + return (ifp); } } } - return (ifp); + return (NULL); } /* @@ -1926,13 +1909,13 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) switch (sopt->sopt_name) { case IP_ADD_MEMBERSHIP: { struct ip_mreqn mreqn; + bool mreq; - if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) - error = sooptcopyin(sopt, &mreqn, - sizeof(struct ip_mreqn), sizeof(struct ip_mreqn)); - else - error = sooptcopyin(sopt, &mreqn, - sizeof(struct ip_mreq), sizeof(struct ip_mreq)); + mreq = (sopt->sopt_valsize != sizeof(struct ip_mreqn)); + + error = sooptcopyin(sopt, &mreqn, + mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn), + mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn)); if (error) return (error); @@ -1943,12 +1926,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) return (EINVAL); NET_EPOCH_ENTER(et); - if (sopt->sopt_valsize == sizeof(struct ip_mreqn) && - mreqn.imr_ifindex != 0) - ifp = ifnet_byindex_ref(mreqn.imr_ifindex); - else - ifp = inp_lookup_mcast_ifp(inp, &gsa->sin, - mreqn.imr_address); + ifp = inp_lookup_mcast_ifp(inp, mreqn.imr_multiaddr, + mreq ? &mreqn.imr_address : NULL, + mreq ? 0 : mreqn.imr_ifindex); NET_EPOCH_EXIT(et); break; } @@ -1971,8 +1951,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) ssa->sin.sin_addr = mreqs.imr_sourceaddr; NET_EPOCH_ENTER(et); - ifp = inp_lookup_mcast_ifp(inp, &gsa->sin, - mreqs.imr_interface); + ifp = inp_lookup_mcast_ifp(inp, mreqs.imr_multiaddr, + &mreqs.imr_interface, 0); NET_EPOCH_EXIT(et); CTR3(KTR_IGMPV3, "%s: imr_interface = 0x%08x, ifp = %p", __func__, ntohl(mreqs.imr_interface.s_addr), ifp); @@ -2013,7 +1993,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) return (EINVAL); NET_EPOCH_ENTER(et); - ifp = ifnet_byindex_ref(gsr.gsr_interface); + ifp = inp_lookup_mcast_ifp(inp, gsa->sin.sin_addr, NULL, + gsr.gsr_interface); NET_EPOCH_EXIT(et); if (ifp == NULL) return (EADDRNOTAVAIL); diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c index 374b5595fcbc..5b89ca026e85 100644 --- a/sys/netinet/siftr.c +++ b/sys/netinet/siftr.c @@ -519,7 +519,7 @@ siftr_pkt_manager_thread(void *arg) if (log_buf != NULL) { alq_post_flags(siftr_alq, log_buf, 0); } - for (;cnt > 0; cnt--) { + for (; cnt > 0; cnt--) { pkt_node = STAILQ_FIRST(&tmp_pkt_queue); STAILQ_REMOVE_HEAD(&tmp_pkt_queue, nodes); free(pkt_node, M_SIFTR_PKTNODE); diff --git a/sys/netinet/tcp_hpts_test.c b/sys/netinet/tcp_hpts_test.c index bab5827e0572..c5dc9cb5b03b 100644 --- a/sys/netinet/tcp_hpts_test.c +++ b/sys/netinet/tcp_hpts_test.c @@ -27,6 +27,7 @@ #include <tests/ktest.h> #include <sys/cdefs.h> +#include "opt_inet.h" #include <sys/param.h> #include <sys/bus.h> #include <sys/interrupt.h> @@ -119,6 +120,8 @@ SYSCTL_INT(_net_inet_tcp_hpts_test, OID_AUTO, exit_on_failure, CTLFLAG_RW, } \ } while (0) +#ifdef TCP_HPTS_KTEST + static void dump_hpts_entry(struct ktest_test_context *ctx, struct tcp_hpts_entry *hpts) { @@ -1658,5 +1661,22 @@ static const struct ktest_test_info tests[] = { KTEST_INFO(generation_count_validation), }; +#else /* TCP_HPTS_KTEST */ + +/* + * Stub to indicate that the TCP HPTS ktest is not enabled. + */ +KTEST_FUNC(module_load_without_tests) +{ + KTEST_LOG(ctx, "Warning: TCP HPTS ktest is not enabled"); + return (0); +} + +static const struct ktest_test_info tests[] = { + KTEST_INFO(module_load_without_tests), +}; + +#endif + KTEST_MODULE_DECLARE(ktest_tcphpts, tests); KTEST_MODULE_DEPEND(ktest_tcphpts, tcphpts); diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c index dd27ec77c1af..2146b0cac48f 100644 --- a/sys/netinet/tcp_input.c +++ b/sys/netinet/tcp_input.c @@ -219,7 +219,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_do_autorcvbuf), 0, "Enable automatic receive buffer sizing"); -VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; +VNET_DEFINE(int, tcp_autorcvbuf_max) = 8*1024*1024; SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_autorcvbuf_max), 0, "Max size of automatic receive buffer"); diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c index 2dfb7faf56e3..208f72c4661c 100644 --- a/sys/netinet/tcp_output.c +++ b/sys/netinet/tcp_output.c @@ -123,7 +123,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_autosndbuf_inc), 0, "Incrementor step size of automatic send buffer"); -VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024; +VNET_DEFINE(int, tcp_autosndbuf_max) = 8*1024*1024; SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_autosndbuf_max), 0, "Max size of automatic send buffer"); diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c index b3f52322425f..d522f9da0fbe 100644 --- a/sys/netpfil/ipfw/ip_dummynet.c +++ b/sys/netpfil/ipfw/ip_dummynet.c @@ -1150,7 +1150,7 @@ copy_data_helper(void *_o, void *_arg) return 0; /* not a pipe */ /* see if the object is within one of our ranges */ - for (;r < lim; r += 2) { + for (; r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; /* Found a valid entry, copy and we are done */ @@ -1183,7 +1183,7 @@ copy_data_helper(void *_o, void *_arg) if (n >= DN_MAX_ID) return 0; /* see if the object is within one of our ranges */ - for (;r < lim; r += 2) { + for (; r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; if (copy_flowset(a, fs, 0)) diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h index b4593f38f592..739723754b7d 100644 --- a/sys/sys/sockbuf.h +++ b/sys/sys/sockbuf.h @@ -62,7 +62,7 @@ #include <sys/_sx.h> #include <sys/_task.h> -#define SB_MAX (2*1024*1024) /* default for max chars in sockbuf */ +#define SB_MAX (8*1024*1024) /* default for max chars in sockbuf */ struct ktls_session; struct mbuf; diff --git a/sys/sys/sockopt.h b/sys/sys/sockopt.h index bfe12d8510d7..d2b0ff5ed2c8 100644 --- a/sys/sys/sockopt.h +++ b/sys/sys/sockopt.h @@ -57,8 +57,10 @@ struct sockopt { int sosetopt(struct socket *so, struct sockopt *sopt); int sogetopt(struct socket *so, struct sockopt *sopt); -int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); -int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); +int __result_use_check sooptcopyin(struct sockopt *sopt, void *buf, size_t len, + size_t minlen); +int __result_use_check sooptcopyout(struct sockopt *sopt, const void *buf, + size_t len); int soopt_getm(struct sockopt *sopt, struct mbuf **mp); int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); |