aboutsummaryrefslogtreecommitdiff
path: root/sys/netinet
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/in_mcast.c113
-rw-r--r--sys/netinet/in_pcb.c13
-rw-r--r--sys/netinet/libalias/alias_db.c2
-rw-r--r--sys/netinet/siftr.c2
-rw-r--r--sys/netinet/tcp_hpts_test.c20
-rw-r--r--sys/netinet/tcp_input.c2
-rw-r--r--sys/netinet/tcp_output.c2
-rw-r--r--sys/netinet/tcp_stacks/bbr.c8
-rw-r--r--sys/netinet/tcp_stacks/rack.c83
-rw-r--r--sys/netinet/tcp_syncache.c35
-rw-r--r--sys/netinet/udp_usrreq.c15
11 files changed, 147 insertions, 148 deletions
diff --git a/sys/netinet/in_mcast.c b/sys/netinet/in_mcast.c
index f5b20c49ffd2..ba112afbf002 100644
--- a/sys/netinet/in_mcast.c
+++ b/sys/netinet/in_mcast.c
@@ -159,9 +159,6 @@ static struct ip_moptions *
static int inp_get_source_filters(struct inpcb *, struct sockopt *);
static int inp_join_group(struct inpcb *, struct sockopt *);
static int inp_leave_group(struct inpcb *, struct sockopt *);
-static struct ifnet *
- inp_lookup_mcast_ifp(const struct inpcb *,
- const struct sockaddr_in *, const struct in_addr);
static int inp_block_unblock_source(struct inpcb *, struct sockopt *);
static int inp_set_multicast_if(struct inpcb *, struct sockopt *);
static int inp_set_source_filters(struct inpcb *, struct sockopt *);
@@ -1832,69 +1829,55 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt)
}
/*
- * Look up the ifnet to use for a multicast group membership,
- * given the IPv4 address of an interface, and the IPv4 group address.
- *
- * This routine exists to support legacy multicast applications
- * which do not understand that multicast memberships are scoped to
- * specific physical links in the networking stack, or which need
- * to join link-scope groups before IPv4 addresses are configured.
- *
- * Use this socket's current FIB number for any required FIB lookup.
- * If ina is INADDR_ANY, look up the group address in the unicast FIB,
- * and use its ifp; usually, this points to the default next-hop.
- *
- * If the FIB lookup fails, attempt to use the first non-loopback
- * interface with multicast capability in the system as a
- * last resort. The legacy IPv4 ASM API requires that we do
- * this in order to allow groups to be joined when the routing
- * table has not yet been populated during boot.
- *
- * Returns NULL if no ifp could be found, otherwise return referenced ifp.
+ * Look up the ifnet to join a multicast group membership via legacy
+ * IP_ADD_MEMBERSHIP or via more modern MCAST_JOIN_GROUP.
*
- * FUTURE: Implement IPv4 source-address selection.
+ * If the interface index was specified explicitly, just use it. If the
+ * address was specified (legacy), try to find matching interface. Else
+ * (index == 0 && no address) do a route lookup. If that fails for a modern
+ * MCAST_JOIN_GROUP return failure, for legacy IP_ADD_MEMBERSHIP find first
+ * multicast capable interface.
*/
static struct ifnet *
-inp_lookup_mcast_ifp(const struct inpcb *inp,
- const struct sockaddr_in *gsin, const struct in_addr ina)
+inp_lookup_mcast_ifp(const struct inpcb *inp, const struct in_addr maddr,
+const struct in_addr *ina, const u_int index)
{
struct ifnet *ifp;
struct nhop_object *nh;
NET_EPOCH_ASSERT();
- KASSERT(inp != NULL, ("%s: inp must not be NULL", __func__));
- KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__));
- KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)),
- ("%s: not multicast", __func__));
- ifp = NULL;
- if (!in_nullhost(ina)) {
- INADDR_TO_IFP(ina, ifp);
+ if (index != 0)
+ return (ifnet_byindex_ref(index));
+
+ if (ina != NULL && !in_nullhost(*ina)) {
+ INADDR_TO_IFP(*ina, ifp);
if (ifp != NULL)
if_ref(ifp);
- } else {
- nh = fib4_lookup(inp->inp_inc.inc_fibnum, gsin->sin_addr, 0, NHR_NONE, 0);
- if (nh != NULL) {
- ifp = nh->nh_ifp;
- if_ref(ifp);
- } else {
- struct in_ifaddr *ia;
- struct ifnet *mifp;
-
- mifp = NULL;
- CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
- mifp = ia->ia_ifp;
- if (!(mifp->if_flags & IFF_LOOPBACK) &&
- (mifp->if_flags & IFF_MULTICAST)) {
- ifp = mifp;
- if_ref(ifp);
- break;
- }
+ return (ifp);
+ }
+
+ nh = fib4_lookup(inp->inp_inc.inc_fibnum, maddr, 0, NHR_NONE, 0);
+ if (nh != NULL) {
+ ifp = nh->nh_ifp;
+ if_ref(ifp);
+ return (ifp);
+ }
+
+ if (ina != NULL) {
+ struct in_ifaddr *ia;
+
+ CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (!(ia->ia_ifp->if_flags & IFF_LOOPBACK) &&
+ (ia->ia_ifp->if_flags & IFF_MULTICAST)) {
+ ifp = ia->ia_ifp;
+ if_ref(ifp);
+ return (ifp);
}
}
}
- return (ifp);
+ return (NULL);
}
/*
@@ -1926,13 +1909,13 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
switch (sopt->sopt_name) {
case IP_ADD_MEMBERSHIP: {
struct ip_mreqn mreqn;
+ bool mreq;
- if (sopt->sopt_valsize == sizeof(struct ip_mreqn))
- error = sooptcopyin(sopt, &mreqn,
- sizeof(struct ip_mreqn), sizeof(struct ip_mreqn));
- else
- error = sooptcopyin(sopt, &mreqn,
- sizeof(struct ip_mreq), sizeof(struct ip_mreq));
+ mreq = (sopt->sopt_valsize != sizeof(struct ip_mreqn));
+
+ error = sooptcopyin(sopt, &mreqn,
+ mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn),
+ mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn));
if (error)
return (error);
@@ -1943,12 +1926,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
return (EINVAL);
NET_EPOCH_ENTER(et);
- if (sopt->sopt_valsize == sizeof(struct ip_mreqn) &&
- mreqn.imr_ifindex != 0)
- ifp = ifnet_byindex_ref(mreqn.imr_ifindex);
- else
- ifp = inp_lookup_mcast_ifp(inp, &gsa->sin,
- mreqn.imr_address);
+ ifp = inp_lookup_mcast_ifp(inp, mreqn.imr_multiaddr,
+ mreq ? &mreqn.imr_address : NULL,
+ mreq ? 0 : mreqn.imr_ifindex);
NET_EPOCH_EXIT(et);
break;
}
@@ -1971,8 +1951,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
ssa->sin.sin_addr = mreqs.imr_sourceaddr;
NET_EPOCH_ENTER(et);
- ifp = inp_lookup_mcast_ifp(inp, &gsa->sin,
- mreqs.imr_interface);
+ ifp = inp_lookup_mcast_ifp(inp, mreqs.imr_multiaddr,
+ &mreqs.imr_interface, 0);
NET_EPOCH_EXIT(et);
CTR3(KTR_IGMPV3, "%s: imr_interface = 0x%08x, ifp = %p",
__func__, ntohl(mreqs.imr_interface.s_addr), ifp);
@@ -2013,7 +1993,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
return (EINVAL);
NET_EPOCH_ENTER(et);
- ifp = ifnet_byindex_ref(gsr.gsr_interface);
+ ifp = inp_lookup_mcast_ifp(inp, gsa->sin.sin_addr, NULL,
+ gsr.gsr_interface);
NET_EPOCH_EXIT(et);
if (ifp == NULL)
return (EADDRNOTAVAIL);
diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c
index dbe48242381d..712ff28768dc 100644
--- a/sys/netinet/in_pcb.c
+++ b/sys/netinet/in_pcb.c
@@ -2665,10 +2665,13 @@ in_pcbinshash(struct inpcb *inp)
INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)];
/*
- * Add entry to load balance group.
- * Only do this if SO_REUSEPORT_LB is set.
+ * Ignore SO_REUSEPORT_LB if the socket is connected. Really this case
+ * should be an error, but for UDP sockets it is not, and some
+ * applications erroneously set it on connected UDP sockets, so we can't
+ * change this without breaking compatibility.
*/
- if ((inp->inp_socket->so_options & SO_REUSEPORT_LB) != 0) {
+ if (!connected &&
+ (inp->inp_socket->so_options & SO_REUSEPORT_LB) != 0) {
int error = in_pcbinslbgrouphash(inp, M_NODOM);
if (error != 0)
return (error);
@@ -2770,6 +2773,10 @@ in_pcbrehash(struct inpcb *inp)
connected = !in_nullhost(inp->inp_faddr);
}
+ /* See the comment in in_pcbinshash(). */
+ if (connected && (inp->inp_flags & INP_INLBGROUP) != 0)
+ in_pcbremlbgrouphash(inp);
+
/*
* When rehashing, the caller must ensure that either the new or the old
* foreign address was unspecified.
diff --git a/sys/netinet/libalias/alias_db.c b/sys/netinet/libalias/alias_db.c
index c143d74a2f45..41f0a328daec 100644
--- a/sys/netinet/libalias/alias_db.c
+++ b/sys/netinet/libalias/alias_db.c
@@ -2181,7 +2181,7 @@ LibAliasInit(struct libalias *la)
#undef malloc /* XXX: ugly */
la = malloc(sizeof *la, M_ALIAS, M_WAITOK | M_ZERO);
#else
- la = calloc(sizeof *la, 1);
+ la = calloc(1, sizeof *la);
if (la == NULL)
return (la);
#endif
diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c
index 374b5595fcbc..5b89ca026e85 100644
--- a/sys/netinet/siftr.c
+++ b/sys/netinet/siftr.c
@@ -519,7 +519,7 @@ siftr_pkt_manager_thread(void *arg)
if (log_buf != NULL) {
alq_post_flags(siftr_alq, log_buf, 0);
}
- for (;cnt > 0; cnt--) {
+ for (; cnt > 0; cnt--) {
pkt_node = STAILQ_FIRST(&tmp_pkt_queue);
STAILQ_REMOVE_HEAD(&tmp_pkt_queue, nodes);
free(pkt_node, M_SIFTR_PKTNODE);
diff --git a/sys/netinet/tcp_hpts_test.c b/sys/netinet/tcp_hpts_test.c
index bab5827e0572..c5dc9cb5b03b 100644
--- a/sys/netinet/tcp_hpts_test.c
+++ b/sys/netinet/tcp_hpts_test.c
@@ -27,6 +27,7 @@
#include <tests/ktest.h>
#include <sys/cdefs.h>
+#include "opt_inet.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
@@ -119,6 +120,8 @@ SYSCTL_INT(_net_inet_tcp_hpts_test, OID_AUTO, exit_on_failure, CTLFLAG_RW,
} \
} while (0)
+#ifdef TCP_HPTS_KTEST
+
static void
dump_hpts_entry(struct ktest_test_context *ctx, struct tcp_hpts_entry *hpts)
{
@@ -1658,5 +1661,22 @@ static const struct ktest_test_info tests[] = {
KTEST_INFO(generation_count_validation),
};
+#else /* TCP_HPTS_KTEST */
+
+/*
+ * Stub to indicate that the TCP HPTS ktest is not enabled.
+ */
+KTEST_FUNC(module_load_without_tests)
+{
+ KTEST_LOG(ctx, "Warning: TCP HPTS ktest is not enabled");
+ return (0);
+}
+
+static const struct ktest_test_info tests[] = {
+ KTEST_INFO(module_load_without_tests),
+};
+
+#endif
+
KTEST_MODULE_DECLARE(ktest_tcphpts, tests);
KTEST_MODULE_DEPEND(ktest_tcphpts, tcphpts);
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index dd27ec77c1af..2146b0cac48f 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -219,7 +219,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_do_autorcvbuf), 0,
"Enable automatic receive buffer sizing");
-VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
+VNET_DEFINE(int, tcp_autorcvbuf_max) = 8*1024*1024;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autorcvbuf_max), 0,
"Max size of automatic receive buffer");
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 2dfb7faf56e3..208f72c4661c 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -123,7 +123,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autosndbuf_inc), 0,
"Incrementor step size of automatic send buffer");
-VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
+VNET_DEFINE(int, tcp_autosndbuf_max) = 8*1024*1024;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autosndbuf_max), 0,
"Max size of automatic send buffer");
diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
index 66983edcdd73..10383bc0801e 100644
--- a/sys/netinet/tcp_stacks/bbr.c
+++ b/sys/netinet/tcp_stacks/bbr.c
@@ -477,7 +477,7 @@ bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied,
uint16_t set);
static struct bbr_sendmap *
bbr_find_lowest_rsm(struct tcp_bbr *bbr);
-static __inline uint32_t
+static inline uint32_t
bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type);
static void
bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay,
@@ -1841,7 +1841,7 @@ bbr_counter_destroy(void)
}
-static __inline void
+static inline void
bbr_fill_in_logging_data(struct tcp_bbr *bbr, struct tcp_log_bbr *l, uint32_t cts)
{
memset(l, 0, sizeof(union tcp_log_stackspecific));
@@ -4206,7 +4206,7 @@ bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr,
/*
* Return one of three RTTs to use (in microseconds).
*/
-static __inline uint32_t
+static inline uint32_t
bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type)
{
uint32_t f_rtt;
@@ -4370,7 +4370,7 @@ bbr_timeout_rack(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
return (0);
}
-static __inline void
+static inline void
bbr_clone_rsm(struct tcp_bbr *bbr, struct bbr_sendmap *nrsm, struct bbr_sendmap *rsm, uint32_t start)
{
int idx;
diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
index c7962b57a69e..50077abdfd86 100644
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -4730,7 +4730,7 @@ rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff
return (timely_says);
}
-static __inline int
+static inline int
rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
{
if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
@@ -4767,7 +4767,7 @@ rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
return (0);
}
-static __inline void
+static inline void
rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
{
@@ -4784,7 +4784,7 @@ rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
rsm->r_flags &= ~RACK_IN_GP_WIN;
}
-static __inline void
+static inline void
rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
{
/* A GP measurement is ending, clear all marks on the send map*/
@@ -4802,7 +4802,7 @@ rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
}
-static __inline void
+static inline void
rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
{
struct rack_sendmap *rsm = NULL;
@@ -6864,6 +6864,18 @@ rack_mark_lost(struct tcpcb *tp,
}
}
+static inline void
+rack_mark_nolonger_lost(struct tcp_rack *rack, struct rack_sendmap *rsm)
+{
+ KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
+ ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
+ rsm->r_flags &= ~RACK_WAS_LOST;
+ if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
+ rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
+ else
+ rack->r_ctl.rc_considered_lost = 0;
+}
+
/*
* RACK Timer, here we simply do logging and house keeping.
* the normal rack_output() function will call the
@@ -7005,7 +7017,7 @@ rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, s
rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
}
-static __inline void
+static inline void
rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
struct rack_sendmap *rsm, uint32_t start)
{
@@ -8130,13 +8142,7 @@ rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
* remove the lost desgination and reduce the
* bytes considered lost.
*/
- rsm->r_flags &= ~RACK_WAS_LOST;
- KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
- ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
- if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
- rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
- else
- rack->r_ctl.rc_considered_lost = 0;
+ rack_mark_nolonger_lost(rack, rsm);
}
idx = rsm->r_rtr_cnt - 1;
rsm->r_tim_lastsent[idx] = ts;
@@ -9492,6 +9498,11 @@ do_rest_ofb:
if (rsm->r_flags & RACK_WAS_LOST) {
int my_chg;
+ /*
+ * Note here we do not use our rack_mark_nolonger_lost() function
+ * since we are moving our data pointer around and the
+ * ack'ed side is already not considered lost.
+ */
my_chg = (nrsm->r_end - nrsm->r_start);
KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
@@ -9659,16 +9670,11 @@ do_rest_ofb:
changed += (rsm->r_end - rsm->r_start);
/* You get a count for acking a whole segment or more */
if (rsm->r_flags & RACK_WAS_LOST) {
- int my_chg;
-
- my_chg = (rsm->r_end - rsm->r_start);
- rsm->r_flags &= ~RACK_WAS_LOST;
- KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
- ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
- if (my_chg <= rack->r_ctl.rc_considered_lost)
- rack->r_ctl.rc_considered_lost -= my_chg;
- else
- rack->r_ctl.rc_considered_lost = 0;
+ /*
+ * Here we can use the inline function since
+ * the rsm is truly marked lost and now no longer lost.
+ */
+ rack_mark_nolonger_lost(rack, rsm);
}
rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
if (rsm->r_in_tmap) /* should be true */
@@ -9851,6 +9857,10 @@ do_rest_ofb:
if (rsm->r_flags & RACK_WAS_LOST) {
int my_chg;
+ /*
+ * Note here we are using hookery again so we can't
+ * use our rack_mark_nolonger_lost() function.
+ */
my_chg = (nrsm->r_end - nrsm->r_start);
KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
@@ -9952,16 +9962,10 @@ do_rest_ofb:
rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
changed += (rsm->r_end - rsm->r_start);
if (rsm->r_flags & RACK_WAS_LOST) {
- int my_chg;
-
- my_chg = (rsm->r_end - rsm->r_start);
- rsm->r_flags &= ~RACK_WAS_LOST;
- KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
- ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
- if (my_chg <= rack->r_ctl.rc_considered_lost)
- rack->r_ctl.rc_considered_lost -= my_chg;
- else
- rack->r_ctl.rc_considered_lost = 0;
+ /*
+ * Here it is safe to use our function.
+ */
+ rack_mark_nolonger_lost(rack, rsm);
}
rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
@@ -10362,13 +10366,7 @@ more:
* and yet before retransmitting we get an ack
* which can happen due to reordering.
*/
- rsm->r_flags &= ~RACK_WAS_LOST;
- KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
- ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
- if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
- rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
- else
- rack->r_ctl.rc_considered_lost = 0;
+ rack_mark_nolonger_lost(rack, rsm);
}
rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
@@ -10476,12 +10474,7 @@ more:
* which can happen due to reordering. In this
* case its only a partial ack of the send.
*/
- KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)),
- ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack));
- if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start))
- rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start;
- else
- rack->r_ctl.rc_considered_lost = 0;
+ rack_mark_nolonger_lost(rack, rsm);
}
/*
* Clear the dup ack count for
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index f842a5678fa1..3cb538f7054d 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -1046,6 +1046,8 @@ abort:
*
* On syncache_socket() success the newly created socket
* has its underlying inp locked.
+ *
+ * *lsop is updated, if and only if 1 is returned.
*/
int
syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
@@ -1094,12 +1096,14 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*/
SCH_UNLOCK(sch);
TCPSTAT_INC(tcps_sc_spurcookie);
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Spurious ACK, "
"segment rejected "
"(syncookies disabled)\n",
s, __func__);
- goto failed;
+ free(s, M_TCPLOG);
+ }
+ return (0);
}
if (sch->sch_last_overflow <
time_uptime - SYNCOOKIE_LIFETIME) {
@@ -1109,12 +1113,14 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*/
SCH_UNLOCK(sch);
TCPSTAT_INC(tcps_sc_spurcookie);
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Spurious ACK, "
"segment rejected "
"(no syncache entry)\n",
s, __func__);
- goto failed;
+ free(s, M_TCPLOG);
+ }
+ return (0);
}
SCH_UNLOCK(sch);
}
@@ -1128,11 +1134,13 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
TCPSTAT_INC(tcps_sc_recvcookie);
} else {
TCPSTAT_INC(tcps_sc_failcookie);
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Segment failed "
"SYNCOOKIE authentication, segment rejected "
"(probably spoofed)\n", s, __func__);
- goto failed;
+ free(s, M_TCPLOG);
+ }
+ return (0);
}
#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
/* If received ACK has MD5 signature, check it. */
@@ -1160,7 +1168,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
/*
* If listening socket requested TCP digests, check that
* received ACK has signature and it is correct.
- * If not, drop the ACK and leave sc entry in th cache,
+ * If not, drop the ACK and leave sc entry in the cache,
* because SYN was received with correct signature.
*/
if (sc->sc_flags & SCF_SIGNATURE) {
@@ -1206,9 +1214,9 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
"%s; %s: SEG.TSval %u < TS.Recent %u, "
"segment dropped\n", s, __func__,
to->to_tsval, sc->sc_tsreflect);
- free(s, M_TCPLOG);
}
SCH_UNLOCK(sch);
+ free(s, M_TCPLOG);
return (-1); /* Do not send RST */
}
@@ -1225,7 +1233,6 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
"expected, segment processed normally\n",
s, __func__);
free(s, M_TCPLOG);
- s = NULL;
}
}
@@ -1312,16 +1319,6 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
if (sc != &scs)
syncache_free(sc);
return (1);
-failed:
- if (sc != NULL) {
- TCPSTATES_DEC(TCPS_SYN_RECEIVED);
- if (sc != &scs)
- syncache_free(sc);
- }
- if (s != NULL)
- free(s, M_TCPLOG);
- *lsop = NULL;
- return (0);
}
static struct socket *
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index cea8a916679b..0a89d91dfc37 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -787,7 +787,8 @@ udplite_ctlinput(struct icmp *icmp)
static int
udp_pcblist(SYSCTL_HANDLER_ARGS)
{
- struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_udbinfo,
+ struct inpcbinfo *pcbinfo = udp_get_inpcbinfo(arg2);
+ struct inpcb_iterator inpi = INP_ALL_ITERATOR(pcbinfo,
INPLOOKUP_RLOCKPCB);
struct xinpgen xig;
struct inpcb *inp;
@@ -799,7 +800,7 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
if (req->oldptr == 0) {
int n;
- n = V_udbinfo.ipi_count;
+ n = pcbinfo->ipi_count;
n += imax(n / 8, 10);
req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
return (0);
@@ -810,8 +811,8 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
bzero(&xig, sizeof(xig));
xig.xig_len = sizeof xig;
- xig.xig_count = V_udbinfo.ipi_count;
- xig.xig_gen = V_udbinfo.ipi_gencnt;
+ xig.xig_count = pcbinfo->ipi_count;
+ xig.xig_gen = pcbinfo->ipi_gencnt;
xig.xig_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xig, sizeof xig);
if (error)
@@ -838,9 +839,9 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
* that something happened while we were processing this
* request, and it might be necessary to retry.
*/
- xig.xig_gen = V_udbinfo.ipi_gencnt;
+ xig.xig_gen = pcbinfo->ipi_gencnt;
xig.xig_sogen = so_gencnt;
- xig.xig_count = V_udbinfo.ipi_count;
+ xig.xig_count = pcbinfo->ipi_count;
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
@@ -848,7 +849,7 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
}
SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
- CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, IPPROTO_UDP,
udp_pcblist, "S,xinpcb",
"List of active UDP sockets");