summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRandall Stewart <rrs@FreeBSD.org>2020-02-12 13:31:36 +0000
committerRandall Stewart <rrs@FreeBSD.org>2020-02-12 13:31:36 +0000
commit481be5de9deed47eae529c7e5df890520ebc6837 (patch)
treeae00086429032445d9eb8f0f606d1f7d3f20561a
parentdf341f5986e63970ef51eb9d751681b13e62a12f (diff)
downloadsrc-test2-481be5de9deed47eae529c7e5df890520ebc6837.tar.gz
src-test2-481be5de9deed47eae529c7e5df890520ebc6837.zip
Notes
-rw-r--r--sys/netinet/cc/cc_cdg.c2
-rw-r--r--sys/netinet/cc/cc_dctcp.c4
-rw-r--r--sys/netinet/cc/cc_htcp.c2
-rw-r--r--sys/netinet/icmp6.h4
-rw-r--r--sys/netinet/if_ether.c4
-rw-r--r--sys/netinet/igmp.c4
-rw-r--r--sys/netinet/in.c4
-rw-r--r--sys/netinet/in.h6
-rw-r--r--sys/netinet/in_mcast.c6
-rw-r--r--sys/netinet/in_pcb.c18
-rw-r--r--sys/netinet/in_pcb.h26
-rw-r--r--sys/netinet/in_proto.c4
-rw-r--r--sys/netinet/in_rmx.c2
-rw-r--r--sys/netinet/ip_divert.c8
-rw-r--r--sys/netinet/ip_dummynet.h2
-rw-r--r--sys/netinet/ip_fastfwd.c2
-rw-r--r--sys/netinet/ip_fw.h22
-rw-r--r--sys/netinet/ip_icmp.c6
-rw-r--r--sys/netinet/ip_id.c2
-rw-r--r--sys/netinet/ip_input.c14
-rw-r--r--sys/netinet/ip_mroute.c10
-rw-r--r--sys/netinet/ip_options.c6
-rw-r--r--sys/netinet/ip_reass.c4
-rw-r--r--sys/netinet/raw_ip.c4
-rw-r--r--sys/netinet/siftr.c6
-rw-r--r--sys/netinet/tcp.h2
-rw-r--r--sys/netinet/tcp_fastopen.c22
-rw-r--r--sys/netinet/tcp_fsm.h2
-rw-r--r--sys/netinet/tcp_input.c22
-rw-r--r--sys/netinet/tcp_log_buf.c16
-rw-r--r--sys/netinet/tcp_log_buf.h2
-rw-r--r--sys/netinet/tcp_lro.c50
-rw-r--r--sys/netinet/tcp_lro.h12
-rw-r--r--sys/netinet/tcp_output.c24
-rw-r--r--sys/netinet/tcp_ratelimit.c344
-rw-r--r--sys/netinet/tcp_ratelimit.h15
-rw-r--r--sys/netinet/tcp_reass.c52
-rw-r--r--sys/netinet/tcp_sack.c14
-rw-r--r--sys/netinet/tcp_subr.c46
-rw-r--r--sys/netinet/tcp_syncache.c14
-rw-r--r--sys/netinet/tcp_timer.c20
-rw-r--r--sys/netinet/tcp_timer.h2
-rw-r--r--sys/netinet/tcp_usrreq.c18
-rw-r--r--sys/netinet/tcp_var.h4
-rw-r--r--sys/netinet/udp.h2
-rw-r--r--sys/netinet/udp_usrreq.c6
-rw-r--r--sys/netinet/udp_var.h2
-rw-r--r--sys/netinet/udplite.h2
48 files changed, 576 insertions, 289 deletions
diff --git a/sys/netinet/cc/cc_cdg.c b/sys/netinet/cc/cc_cdg.c
index 44c566f73dbf..deab55a24e94 100644
--- a/sys/netinet/cc/cc_cdg.c
+++ b/sys/netinet/cc/cc_cdg.c
@@ -607,7 +607,7 @@ cdg_ack_received(struct cc_var *ccv, uint16_t ack_type)
congestion = prob_backoff(qdiff_max);
else if (cdg_data->max_qtrend > 0)
congestion = prob_backoff(cdg_data->max_qtrend);
-
+
/* Update estimate of queue state. */
if (cdg_data->min_qtrend > 0 &&
cdg_data->max_qtrend <= 0) {
diff --git a/sys/netinet/cc/cc_dctcp.c b/sys/netinet/cc/cc_dctcp.c
index 72aa8f73c0d3..13267217485c 100644
--- a/sys/netinet/cc/cc_dctcp.c
+++ b/sys/netinet/cc/cc_dctcp.c
@@ -274,9 +274,9 @@ dctcp_cong_signal(struct cc_var *ccv, uint32_t type)
dctcp_data->bytes_total = 0;
dctcp_data->save_sndnxt = CCV(ccv, snd_nxt);
} else
- CCV(ccv, snd_ssthresh) =
+ CCV(ccv, snd_ssthresh) =
max((cwin - (((uint64_t)cwin *
- dctcp_data->alpha) >> (DCTCP_SHIFT+1))),
+ dctcp_data->alpha) >> (DCTCP_SHIFT+1))),
2 * mss);
CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
diff --git a/sys/netinet/cc/cc_htcp.c b/sys/netinet/cc/cc_htcp.c
index 1686a4e5553a..273ebf3b6a52 100644
--- a/sys/netinet/cc/cc_htcp.c
+++ b/sys/netinet/cc/cc_htcp.c
@@ -364,7 +364,7 @@ htcp_post_recovery(struct cc_var *ccv)
pipe = tcp_compute_pipe(ccv->ccvc.tcp);
else
pipe = CCV(ccv, snd_max) - ccv->curack;
-
+
if (pipe < CCV(ccv, snd_ssthresh))
/*
* Ensure that cwnd down not collape to 1 MSS under
diff --git a/sys/netinet/icmp6.h b/sys/netinet/icmp6.h
index 53c8b57ee0c4..00fa21ec6840 100644
--- a/sys/netinet/icmp6.h
+++ b/sys/netinet/icmp6.h
@@ -344,7 +344,7 @@ struct nd_opt_mtu { /* MTU option */
#define ND_OPT_NONCE_LEN ((1 * 8) - 2)
#if ((ND_OPT_NONCE_LEN + 2) % 8) != 0
#error "(ND_OPT_NONCE_LEN + 2) must be a multiple of 8."
-#endif
+#endif
struct nd_opt_nonce { /* nonce option */
u_int8_t nd_opt_nonce_type;
u_int8_t nd_opt_nonce_len;
@@ -607,7 +607,7 @@ struct icmp6stat {
* for netinet6 code, it is already available in icp6s_outhist[].
*/
uint64_t icp6s_reflect;
- uint64_t icp6s_inhist[256];
+ uint64_t icp6s_inhist[256];
uint64_t icp6s_nd_toomanyopt; /* too many ND options */
struct icmp6errstat icp6s_outerrhist;
#define icp6s_odst_unreach_noroute \
diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c
index cb3c335d0897..53c78c2a299c 100644
--- a/sys/netinet/if_ether.c
+++ b/sys/netinet/if_ether.c
@@ -211,7 +211,7 @@ arptimer(void *arg)
LLE_WLOCK(lle);
if (callout_pending(&lle->lle_timer)) {
/*
- * Here we are a bit odd here in the treatment of
+ * Here we are a bit odd here in the treatment of
* active/pending. If the pending bit is set, it got
* rescheduled before I ran. The active
* bit we ignore, since if it was stopped
@@ -709,7 +709,7 @@ arpintr(struct mbuf *m)
layer = "ethernet";
break;
case ARPHRD_INFINIBAND:
- hlen = 20; /* RFC 4391, INFINIBAND_ALEN */
+ hlen = 20; /* RFC 4391, INFINIBAND_ALEN */
layer = "infiniband";
break;
case ARPHRD_IEEE1394:
diff --git a/sys/netinet/igmp.c b/sys/netinet/igmp.c
index 52aff9e69995..5ee99dbb91d3 100644
--- a/sys/netinet/igmp.c
+++ b/sys/netinet/igmp.c
@@ -877,7 +877,7 @@ out_locked:
* We may be updating the group for the first time since we switched
* to IGMPv3. If we are, then we must clear any recorded source lists,
* and transition to REPORTING state; the group timer is overloaded
- * for group and group-source query responses.
+ * for group and group-source query responses.
*
* Unlike IGMPv3, the delay per group should be jittered
* to avoid bursts of IGMPv2 reports.
@@ -2324,7 +2324,7 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
struct ifnet *ifp;
struct mbufq *mq;
int error, retval, syncstates;
-
+
CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index bf38e3c33500..fb44766fc61d 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -820,11 +820,11 @@ in_scrubprefix(struct in_ifaddr *target, u_int flags)
if ((target->ia_flags & IFA_ROUTE) == 0) {
int fibnum;
-
+
fibnum = V_rt_add_addr_allfibs ? RT_ALL_FIBS :
target->ia_ifp->if_fib;
rt_addrmsg(RTM_DELETE, &target->ia_ifa, fibnum);
-
+
/*
* Removing address from !IFF_UP interface or
* prefix which exists on other interface (along with route).
diff --git a/sys/netinet/in.h b/sys/netinet/in.h
index e3d7cf38eb91..84a209eef779 100644
--- a/sys/netinet/in.h
+++ b/sys/netinet/in.h
@@ -323,8 +323,8 @@ __END_DECLS
* Default local port range, used by IP_PORTRANGE_DEFAULT
*/
#define IPPORT_EPHEMERALFIRST 10000
-#define IPPORT_EPHEMERALLAST 65535
-
+#define IPPORT_EPHEMERALLAST 65535
+
/*
* Dynamic port range, used by IP_PORTRANGE_HIGH.
*/
@@ -381,7 +381,7 @@ __END_DECLS
(((in_addr_t)(i) & 0xffff0000) == 0xc0a80000))
#define IN_LOCAL_GROUP(i) (((in_addr_t)(i) & 0xffffff00) == 0xe0000000)
-
+
#define IN_ANY_LOCAL(i) (IN_LINKLOCAL(i) || IN_LOCAL_GROUP(i))
#define INADDR_LOOPBACK ((in_addr_t)0x7f000001)
diff --git a/sys/netinet/in_mcast.c b/sys/netinet/in_mcast.c
index 671dc675daed..b1b9923657e5 100644
--- a/sys/netinet/in_mcast.c
+++ b/sys/netinet/in_mcast.c
@@ -526,7 +526,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
IN_MULTI_LIST_UNLOCK();
if (inm != NULL)
return (0);
-
+
memset(&gsin, 0, sizeof(gsin));
gsin.sin_family = AF_INET;
gsin.sin_len = sizeof(struct sockaddr_in);
@@ -2207,7 +2207,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
goto out_inp_unlocked;
}
if (error) {
- CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
+ CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
__func__);
goto out_inp_locked;
}
@@ -2627,7 +2627,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
int i;
INP_WUNLOCK(inp);
-
+
CTR2(KTR_IGMPV3, "%s: loading %lu source list entries",
__func__, (unsigned long)msfr.msfr_nsrcs);
kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c
index f519ee0144a2..be954c30ddef 100644
--- a/sys/netinet/in_pcb.c
+++ b/sys/netinet/in_pcb.c
@@ -1059,7 +1059,7 @@ in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr,
/*
* If we found a route, use the address corresponding to
* the outgoing interface.
- *
+ *
* Otherwise assume faddr is reachable on a directly connected
* network and try to find a corresponding interface to take
* the source address from.
@@ -1454,13 +1454,13 @@ in_pcbrele_rlocked(struct inpcb *inp)
}
return (0);
}
-
+
KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
#ifdef TCPHPTS
if (inp->inp_in_hpts || inp->inp_in_input) {
struct tcp_hpts_entry *hpts;
/*
- * We should not be on the hpts at
+ * We should not be on the hpts at
* this point in any form. we must
* get the lock to be sure.
*/
@@ -1470,7 +1470,7 @@ in_pcbrele_rlocked(struct inpcb *inp)
hpts, inp);
mtx_unlock(&hpts->p_mtx);
hpts = tcp_input_lock(inp);
- if (inp->inp_in_input)
+ if (inp->inp_in_input)
panic("Hpts:%p inp:%p at free still on input hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
@@ -1508,7 +1508,7 @@ in_pcbrele_wlocked(struct inpcb *inp)
if (inp->inp_in_hpts || inp->inp_in_input) {
struct tcp_hpts_entry *hpts;
/*
- * We should not be on the hpts at
+ * We should not be on the hpts at
* this point in any form. we must
* get the lock to be sure.
*/
@@ -1518,7 +1518,7 @@ in_pcbrele_wlocked(struct inpcb *inp)
hpts, inp);
mtx_unlock(&hpts->p_mtx);
hpts = tcp_input_lock(inp);
- if (inp->inp_in_input)
+ if (inp->inp_in_input)
panic("Hpts:%p inp:%p at free still on input hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
@@ -1612,7 +1612,7 @@ in_pcbfree_deferred(epoch_context_t ctx)
#endif
#ifdef INET
inp_freemoptions(imo);
-#endif
+#endif
CURVNET_RESTORE();
}
@@ -2731,7 +2731,7 @@ ip_fini(void *xtp)
callout_stop(&ipport_tick_callout);
}
-/*
+/*
* The ipport_callout should start running at about the time we attach the
* inet or inet6 domains.
*/
@@ -2745,7 +2745,7 @@ ipport_tick_init(const void *unused __unused)
EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
SHUTDOWN_PRI_DEFAULT);
}
-SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
+SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
ipport_tick_init, NULL);
void
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index cf4a613be53c..b874bc49c632 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -163,22 +163,22 @@ struct in_conninfo {
* (h) - Protected by the pcbhash lock for the inpcb
* (s) - Protected by another subsystem's locks
* (x) - Undefined locking
- *
+ *
* Notes on the tcp_hpts:
- *
+ *
* First Hpts lock order is
* 1) INP_WLOCK()
- * 2) HPTS_LOCK() i.e. hpts->pmtx
+ * 2) HPTS_LOCK() i.e. hpts->pmtx
*
- * To insert a TCB on the hpts you *must* be holding the INP_WLOCK().
- * You may check the inp->inp_in_hpts flag without the hpts lock.
- * The hpts is the only one that will clear this flag holding
+ * To insert a TCB on the hpts you *must* be holding the INP_WLOCK().
+ * You may check the inp->inp_in_hpts flag without the hpts lock.
+ * The hpts is the only one that will clear this flag holding
* only the hpts lock. This means that in your tcp_output()
- * routine when you test for the inp_in_hpts flag to be 1
- * it may be transitioning to 0 (by the hpts).
- * That's ok since that will just mean an extra call to tcp_output
+ * routine when you test for the inp_in_hpts flag to be 1
+ * it may be transitioning to 0 (by the hpts).
+ * That's ok since that will just mean an extra call to tcp_output
* that most likely will find the call you executed
- * (when the mis-match occured) will have put the TCB back
+ * (when the mis-match occured) will have put the TCB back
* on the hpts and it will return. If your
* call did not add the inp back to the hpts then you will either
* over-send or the cwnd will block you from sending more.
@@ -189,7 +189,7 @@ struct in_conninfo {
* the INP_WLOCK() or from destroying your TCB where again
* you should already have the INP_WLOCK().
*
- * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and
+ * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and
* inp_input_cpu_set fields are controlled completely by
* the hpts. Do not ever set these. The inp_hpts_cpu_set
* and inp_input_cpu_set fields indicate if the hpts has
@@ -243,14 +243,14 @@ struct inpcb {
* fits in the pacing window (i&b). */
/*
* Note the next fields are protected by a
- * different lock (hpts-lock). This means that
+ * different lock (hpts-lock). This means that
* they must correspond in size to the smallest
* protectable bit field (uint8_t on x86, and
* other platfomrs potentially uint32_t?). Also
* since CPU switches can occur at different times the two
* fields can *not* be collapsed into a signal bit field.
*/
-#if defined(__amd64__) || defined(__i386__)
+#if defined(__amd64__) || defined(__i386__)
volatile uint8_t inp_in_hpts; /* on output hpts (lock b) */
volatile uint8_t inp_in_input; /* on input hpts (lock b) */
#else
diff --git a/sys/netinet/in_proto.c b/sys/netinet/in_proto.c
index b719ff542c60..75e049122800 100644
--- a/sys/netinet/in_proto.c
+++ b/sys/netinet/in_proto.c
@@ -146,7 +146,7 @@ struct protosw inetsw[] = {
.pr_usrreqs = &tcp_usrreqs
},
#ifdef SCTP
-{
+{
.pr_type = SOCK_SEQPACKET,
.pr_domain = &inetdomain,
.pr_protocol = IPPROTO_SCTP,
@@ -158,7 +158,7 @@ struct protosw inetsw[] = {
.pr_drain = sctp_drain,
.pr_usrreqs = &sctp_usrreqs
},
-{
+{
.pr_type = SOCK_STREAM,
.pr_domain = &inetdomain,
.pr_protocol = IPPROTO_SCTP,
diff --git a/sys/netinet/in_rmx.c b/sys/netinet/in_rmx.c
index 0486752bb206..8ce8c2abddb7 100644
--- a/sys/netinet/in_rmx.c
+++ b/sys/netinet/in_rmx.c
@@ -187,7 +187,7 @@ in_ifadown(struct ifaddr *ifa, int delete)
}
/*
- * inet versions of rt functions. These have fib extensions and
+ * inet versions of rt functions. These have fib extensions and
* for now will just reference the _fib variants.
* eventually this order will be reversed,
*/
diff --git a/sys/netinet/ip_divert.c b/sys/netinet/ip_divert.c
index 80bfd86c1951..2611a8792be3 100644
--- a/sys/netinet/ip_divert.c
+++ b/sys/netinet/ip_divert.c
@@ -57,7 +57,7 @@ __FBSDID("$FreeBSD$");
#include <net/if.h>
#include <net/if_var.h>
-#include <net/netisr.h>
+#include <net/netisr.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
@@ -252,10 +252,10 @@ divert_packet(struct mbuf *m, bool incoming)
*/
if (m->m_pkthdr.rcvif) {
/*
- * Hide the actual interface name in there in the
+ * Hide the actual interface name in there in the
* sin_zero array. XXX This needs to be moved to a
* different sockaddr type for divert, e.g.
- * sockaddr_div with multiple fields like
+ * sockaddr_div with multiple fields like
* sockaddr_dl. Presently we have only 7 bytes
* but that will do for now as most interfaces
* are 4 or less + 2 or less bytes for unit.
@@ -268,7 +268,7 @@ divert_packet(struct mbuf *m, bool incoming)
* and re-uses the sockaddr_in as suggested in the man pages,
* this iface name will come along for the ride.
* (see div_output for the other half of this.)
- */
+ */
strlcpy(divsrc.sin_zero, m->m_pkthdr.rcvif->if_xname,
sizeof(divsrc.sin_zero));
}
diff --git a/sys/netinet/ip_dummynet.h b/sys/netinet/ip_dummynet.h
index 9d64b3e9e8b5..173debdaccc0 100644
--- a/sys/netinet/ip_dummynet.h
+++ b/sys/netinet/ip_dummynet.h
@@ -277,7 +277,7 @@ the objects used by dummynet:
to delay and bandwidth;
+ dn_profile describes a delay profile;
+ dn_flow describes the flow status (flow id, statistics)
-
+
+ dn_sch describes a scheduler
+ dn_fs describes a flowset (msk, weight, queue parameters)
diff --git a/sys/netinet/ip_fastfwd.c b/sys/netinet/ip_fastfwd.c
index 77a08b1a8af1..502bd15cb072 100644
--- a/sys/netinet/ip_fastfwd.c
+++ b/sys/netinet/ip_fastfwd.c
@@ -57,7 +57,7 @@
*
* We try to do the least expensive (in CPU ops) checks and operations
* first to catch junk with as little overhead as possible.
- *
+ *
* We take full advantage of hardware support for IP checksum and
* fragmentation offloading.
*
diff --git a/sys/netinet/ip_fw.h b/sys/netinet/ip_fw.h
index 7a01c82ba58b..751505172928 100644
--- a/sys/netinet/ip_fw.h
+++ b/sys/netinet/ip_fw.h
@@ -34,7 +34,7 @@
* The default rule number. By the design of ip_fw, the default rule
* is the last one, so its number can also serve as the highest number
* allowed for a rule. The ip_fw code relies on both meanings of this
- * constant.
+ * constant.
*/
#define IPFW_DEFAULT_RULE 65535
@@ -239,7 +239,7 @@ enum ipfw_opcodes { /* arguments (4 byte each) */
O_FORWARD_MAC, /* fwd mac */
O_NAT, /* nope */
O_REASS, /* none */
-
+
/*
* More opcodes.
*/
@@ -277,7 +277,7 @@ enum ipfw_opcodes { /* arguments (4 byte each) */
O_SETFIB, /* arg1=FIB number */
O_FIB, /* arg1=FIB desired fib number */
-
+
O_SOCKARG, /* socket argument */
O_CALLRETURN, /* arg1=called rule number */
@@ -485,9 +485,9 @@ struct cfg_redir {
u_short pport_cnt; /* number of public ports */
u_short rport_cnt; /* number of remote ports */
int proto; /* protocol: tcp/udp */
- struct alias_link **alink;
+ struct alias_link **alink;
/* num of entry in spool chain */
- u_int16_t spool_cnt;
+ u_int16_t spool_cnt;
/* chain of spool instances */
LIST_HEAD(spool_chain, cfg_spool) spool_chain;
};
@@ -504,9 +504,9 @@ struct cfg_nat {
int mode; /* aliasing mode */
struct libalias *lib; /* libalias instance */
/* number of entry in spool chain */
- int redir_cnt;
+ int redir_cnt;
/* chain of redir instances */
- LIST_HEAD(redir_chain, cfg_redir) redir_chain;
+ LIST_HEAD(redir_chain, cfg_redir) redir_chain;
};
#endif
@@ -537,7 +537,7 @@ struct nat44_cfg_redir {
uint16_t pport_cnt; /* number of public ports */
uint16_t rport_cnt; /* number of remote ports */
uint16_t mode; /* type of redirect mode */
- uint16_t spool_cnt; /* num of entry in spool chain */
+ uint16_t spool_cnt; /* num of entry in spool chain */
uint16_t spare;
uint32_t proto; /* protocol: tcp/udp */
};
@@ -555,7 +555,7 @@ struct nat44_cfg_nat {
/* Nat command. */
typedef struct _ipfw_insn_nat {
ipfw_insn o;
- struct cfg_nat *nat;
+ struct cfg_nat *nat;
} ipfw_insn_nat;
/* Apply ipv6 mask on ipv6 addr */
@@ -579,7 +579,7 @@ typedef struct _ipfw_insn_icmp6 {
uint32_t d[7]; /* XXX This number si related to the netinet/icmp6.h
* define ICMP6_MAXTYPE
* as follows: n = ICMP6_MAXTYPE/32 + 1
- * Actually is 203
+ * Actually is 203
*/
} ipfw_insn_icmp6;
@@ -900,7 +900,7 @@ typedef struct _ipfw_obj_tentry {
uint32_t key; /* uid/gid/port */
struct in6_addr addr6; /* IPv6 address */
char iface[IF_NAMESIZE]; /* interface name */
- struct tflow_entry flow;
+ struct tflow_entry flow;
} k;
union {
ipfw_table_value value; /* value data */
diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c
index 356414fbdc73..cbae3953b016 100644
--- a/sys/netinet/ip_icmp.c
+++ b/sys/netinet/ip_icmp.c
@@ -563,7 +563,7 @@ icmp_input(struct mbuf **mp, int *offp, int proto)
* - The outer IP header has no options.
* - The outer IP header, the ICMP header, the inner IP header,
* and the first n bytes of the inner payload are contiguous.
- * n is at least 8, but might be larger based on
+ * n is at least 8, but might be larger based on
* ICMP_ADVLENPREF. See its definition in ip_icmp.h.
*/
ctlfunc = inetsw[ip_protox[icp->icmp_ip.ip_p]].pr_ctlinput;
@@ -629,7 +629,7 @@ icmp_input(struct mbuf **mp, int *offp, int proto)
(struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif);
if (ia == NULL)
break;
- if (ia->ia_ifp == NULL)
+ if (ia->ia_ifp == NULL)
break;
icp->icmp_type = ICMP_MASKREPLY;
if (V_icmpmaskfake == 0)
@@ -937,7 +937,7 @@ done:
*
* @src: sockaddr with address of redirect originator
* @dst: sockaddr with destination in question
- * @gateway: new proposed gateway
+ * @gateway: new proposed gateway
*
* Returns 0 on success.
*/
diff --git a/sys/netinet/ip_id.c b/sys/netinet/ip_id.c
index d124d541442a..41a6e400c76d 100644
--- a/sys/netinet/ip_id.c
+++ b/sys/netinet/ip_id.c
@@ -280,7 +280,7 @@ ipid_sysinit(void)
mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF);
V_ip_id = counter_u64_alloc(M_WAITOK);
-
+
CPU_FOREACH(i)
arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0);
}
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 8991262cc120..691c83925387 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -639,12 +639,12 @@ passin:
return;
/* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
- * matter if it is destined to another node, or whether it is
+ * matter if it is destined to another node, or whether it is
* a multicast one, RSVP wants it! and prevents it from being forwarded
* anywhere else. Also checks if the rsvp daemon is running before
* grabbing the packet.
*/
- if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
+ if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
goto ours;
/*
@@ -675,7 +675,7 @@ passin:
* insert a workaround. If the packet got here, we already
* checked with carp_iamatch() and carp_forus().
*/
- checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
+ checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
ifp->if_carp == NULL && (dchg == 0);
@@ -689,7 +689,7 @@ passin:
* arrived via the correct interface if checking is
* enabled.
*/
- if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
+ if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
(!checkif || ia->ia_ifp == ifp)) {
counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
counter_u64_add(ia->ia_ifa.ifa_ibytes,
@@ -1282,7 +1282,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
}
bcopy(sdp, sdl2, sdp->sdl_len);
} else {
-makedummy:
+makedummy:
sdl2->sdl_len =
offsetof(struct sockaddr_dl, sdl_data[0]);
sdl2->sdl_family = AF_LINK;
@@ -1408,13 +1408,13 @@ rsvp_input(struct mbuf **mp, int *offp, int proto)
* of the group to which the RSVP packet is addressed. But in this
* case we want to throw the packet away.
*/
-
+
if (!V_rsvp_on) {
m_freem(m);
return (IPPROTO_DONE);
}
- if (V_ip_rsvpd != NULL) {
+ if (V_ip_rsvpd != NULL) {
*mp = m;
rip_input(mp, offp, proto);
return (IPPROTO_DONE);
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index ccde0867f497..f7121e598e63 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -182,7 +182,7 @@ VNET_DEFINE_STATIC(vifi_t, numvifs);
VNET_DEFINE_STATIC(struct vif *, viftable);
#define V_viftable VNET(viftable)
/*
- * No one should be able to "query" this before initialisation happened in
+ * No one should be able to "query" this before initialisation happened in
* vnet_mroute_init(), so we should still be fine.
*/
SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_VNET | CTLFLAG_RD,
@@ -653,7 +653,7 @@ if_detached_event(void *arg __unused, struct ifnet *ifp)
MROUTER_UNLOCK();
}
-
+
/*
* Enable multicast forwarding.
*/
@@ -742,7 +742,7 @@ X_ip_mrouter_done(void)
bzero((caddr_t)V_viftable, sizeof(V_viftable));
V_numvifs = 0;
V_pim_assert_enabled = 0;
-
+
VIF_UNLOCK();
callout_stop(&V_expire_upcalls_ch);
@@ -2833,7 +2833,7 @@ vnet_mroute_uninit(const void *unused __unused)
V_nexpire = NULL;
}
-VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE,
+VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE,
vnet_mroute_uninit, NULL);
static int
@@ -2844,7 +2844,7 @@ ip_mroute_modevent(module_t mod, int type, void *unused)
case MOD_LOAD:
MROUTER_LOCK_INIT();
- if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
if (if_detach_event_tag == NULL) {
printf("ip_mroute: unable to register "
diff --git a/sys/netinet/ip_options.c b/sys/netinet/ip_options.c
index cb9920d86d9e..92ce394d08ce 100644
--- a/sys/netinet/ip_options.c
+++ b/sys/netinet/ip_options.c
@@ -75,8 +75,8 @@ SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
#define V_ip_dosourceroute VNET(ip_dosourceroute)
VNET_DEFINE_STATIC(int, ip_acceptsourceroute);
-SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
- CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip_acceptsourceroute), 0,
+SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip_acceptsourceroute), 0,
"Enable accepting source routed IP packets");
#define V_ip_acceptsourceroute VNET(ip_acceptsourceroute)
@@ -208,7 +208,7 @@ ip_dooptions(struct mbuf *m, int pass)
* ICMP
*/
nosourcerouting:
- log(LOG_WARNING,
+ log(LOG_WARNING,
"attempted source route from %s "
"to %s\n",
inet_ntoa_r(ip->ip_src, srcbuf),
diff --git a/sys/netinet/ip_reass.c b/sys/netinet/ip_reass.c
index a0503cd614c2..969dd301065d 100644
--- a/sys/netinet/ip_reass.c
+++ b/sys/netinet/ip_reass.c
@@ -637,7 +637,7 @@ ipreass_cleanup(void *arg __unused, struct ifnet *ifp)
/*
* Skip processing if IPv4 reassembly is not initialised or
* torn down by ipreass_destroy().
- */
+ */
if (V_ipq_zone == NULL) {
CURVNET_RESTORE();
return;
@@ -750,7 +750,7 @@ sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
max = uma_zone_get_max(V_ipq_zone);
if (max == 0)
max = -1;
- } else
+ } else
max = 0;
error = sysctl_handle_int(oidp, &max, 0, req);
if (error || !req->newptr)
diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c
index b331b9bdd53a..ed7ed099339f 100644
--- a/sys/netinet/raw_ip.c
+++ b/sys/netinet/raw_ip.c
@@ -160,7 +160,7 @@ rip_inshash(struct inpcb *inp)
INP_INFO_WLOCK_ASSERT(pcbinfo);
INP_WLOCK_ASSERT(inp);
-
+
if (inp->inp_ip_p != 0 &&
inp->inp_laddr.s_addr != INADDR_ANY &&
inp->inp_faddr.s_addr != INADDR_ANY) {
@@ -892,7 +892,7 @@ rip_detach(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
- KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
+ KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
("rip_detach: not closed"));
INP_INFO_WLOCK(&V_ripcbinfo);
diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c
index e93bcf79b223..ba68fa2774f7 100644
--- a/sys/netinet/siftr.c
+++ b/sys/netinet/siftr.c
@@ -235,9 +235,9 @@ struct pkt_node {
/* Number of segments currently in the reassembly queue. */
int t_segqlen;
/* Flowid for the connection. */
- u_int flowid;
+ u_int flowid;
/* Flow type for the connection. */
- u_int flowtype;
+ u_int flowtype;
/* Link to next pkt_node in the list. */
STAILQ_ENTRY(pkt_node) nodes;
};
@@ -1103,7 +1103,7 @@ siftr_chkpkt6(struct mbuf **m, struct ifnet *ifp, int flags, struct inpcb *inp)
* Only pkts selected by the tcp port filter
* can be inserted into the pkt_queue
*/
- if ((siftr_port_filter != 0) &&
+ if ((siftr_port_filter != 0) &&
(siftr_port_filter != ntohs(inp->inp_lport)) &&
(siftr_port_filter != ntohs(inp->inp_fport))) {
goto inp_unlock6;
diff --git a/sys/netinet/tcp.h b/sys/netinet/tcp.h
index 528f3cd8dedd..fe9221a7460a 100644
--- a/sys/netinet/tcp.h
+++ b/sys/netinet/tcp.h
@@ -333,7 +333,7 @@ struct tcp_info {
u_int32_t tcpi_snd_rexmitpack; /* Retransmitted packets */
u_int32_t tcpi_rcv_ooopack; /* Out-of-order packets */
u_int32_t tcpi_snd_zerowin; /* Zero-sized windows sent */
-
+
/* Padding to grow without breaking ABI. */
u_int32_t __tcpi_pad[26]; /* Padding. */
};
diff --git a/sys/netinet/tcp_fastopen.c b/sys/netinet/tcp_fastopen.c
index 396b1c9c3d01..7fb05ab50a91 100644
--- a/sys/netinet/tcp_fastopen.c
+++ b/sys/netinet/tcp_fastopen.c
@@ -386,7 +386,7 @@ void
tcp_fastopen_init(void)
{
unsigned int i;
-
+
V_counter_zone = uma_zcreate("tfo", sizeof(unsigned int),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
rm_init(&V_tcp_fastopen_keylock, "tfo_keylock");
@@ -450,7 +450,7 @@ tcp_fastopen_destroy(void)
struct tcp_fastopen_ccache_bucket *ccb;
unsigned int i;
- for (i = 0; i < V_tcp_fastopen_ccache.buckets; i++) {
+ for (i = 0; i < V_tcp_fastopen_ccache.buckets; i++) {
ccb = &V_tcp_fastopen_ccache.base[i];
tcp_fastopen_ccache_bucket_trim(ccb, 0);
mtx_destroy(&ccb->ccb_mtx);
@@ -807,7 +807,7 @@ sysctl_net_inet_tcp_fastopen_ccache_bucket_limit(SYSCTL_HANDLER_ARGS)
int error;
unsigned int new;
unsigned int i;
-
+
new = V_tcp_fastopen_ccache.bucket_limit;
error = sysctl_handle_int(oidp, &new, 0, req);
if (error == 0 && req->newptr) {
@@ -823,7 +823,7 @@ sysctl_net_inet_tcp_fastopen_ccache_bucket_limit(SYSCTL_HANDLER_ARGS)
}
V_tcp_fastopen_ccache.bucket_limit = new;
}
-
+
}
return (error);
}
@@ -860,7 +860,7 @@ sysctl_net_inet_tcp_fastopen_client_enable(SYSCTL_HANDLER_ARGS)
ccb->ccb_num_entries));
ccb->ccb_num_entries = 0; /* enable bucket */
CCB_UNLOCK(ccb);
- }
+ }
V_tcp_fastopen_client_enable = 1;
}
}
@@ -876,7 +876,7 @@ tcp_fastopen_connect(struct tcpcb *tp)
sbintime_t now;
uint16_t server_mss;
uint64_t psk_cookie;
-
+
psk_cookie = 0;
inp = tp->t_inpcb;
cce = tcp_fastopen_ccache_lookup(&inp->inp_inc, &ccb);
@@ -1032,7 +1032,7 @@ tcp_fastopen_ccache_lookup(struct in_conninfo *inc,
ccb = &V_tcp_fastopen_ccache.base[hash & V_tcp_fastopen_ccache.mask];
*ccbp = ccb;
CCB_LOCK(ccb);
-
+
/*
* Always returns with locked bucket.
*/
@@ -1055,7 +1055,7 @@ tcp_fastopen_ccache_create(struct tcp_fastopen_ccache_bucket *ccb,
struct in_conninfo *inc, uint16_t mss, uint8_t cookie_len, uint8_t *cookie)
{
struct tcp_fastopen_ccache_entry *cce;
-
+
/*
* 1. Create a new entry, or
* 2. Reclaim an existing entry, or
@@ -1063,7 +1063,7 @@ tcp_fastopen_ccache_create(struct tcp_fastopen_ccache_bucket *ccb,
*/
CCB_LOCK_ASSERT(ccb);
-
+
cce = NULL;
if (ccb->ccb_num_entries < V_tcp_fastopen_ccache.bucket_limit)
cce = uma_zalloc(V_tcp_fastopen_ccache.zone, M_NOWAIT);
@@ -1106,7 +1106,7 @@ tcp_fastopen_ccache_create(struct tcp_fastopen_ccache_bucket *ccb,
cce->cookie_len = 0;
cce->disable_time = getsbinuptime();
}
-
+
return (cce);
}
@@ -1116,7 +1116,7 @@ tcp_fastopen_ccache_bucket_trim(struct tcp_fastopen_ccache_bucket *ccb,
{
struct tcp_fastopen_ccache_entry *cce, *cce_tmp;
unsigned int entries;
-
+
CCB_LOCK(ccb);
entries = 0;
TAILQ_FOREACH_SAFE(cce, &ccb->ccb_entries, cce_link, cce_tmp) {
diff --git a/sys/netinet/tcp_fsm.h b/sys/netinet/tcp_fsm.h
index dcc4a4e8aa63..8bd129f613cf 100644
--- a/sys/netinet/tcp_fsm.h
+++ b/sys/netinet/tcp_fsm.h
@@ -97,7 +97,7 @@ static u_char tcp_outflags[TCP_NSTATES] = {
TH_FIN|TH_ACK, /* 8, LAST_ACK */
TH_ACK, /* 9, FIN_WAIT_2 */
TH_ACK, /* 10, TIME_WAIT */
-};
+};
#endif
#ifdef KPROF
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 5050210ca1b3..d23a364e2fc2 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -369,7 +369,7 @@ cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
#endif
}
-void
+void
cc_conn_init(struct tcpcb *tp)
{
struct hc_metrics_lite metrics;
@@ -1687,7 +1687,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
th->th_seq == tp->rcv_nxt &&
(thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
tp->snd_nxt == tp->snd_max &&
- tiwin && tiwin == tp->snd_wnd &&
+ tiwin && tiwin == tp->snd_wnd &&
((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
SEGQ_EMPTY(tp) &&
((to.to_flags & TOF_TS) == 0 ||
@@ -1764,7 +1764,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
tp->snd_recover = th->th_ack - 1;
-
+
/*
* Let the congestion control algorithm update
* congestion control related information. This
@@ -1908,7 +1908,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
goto dropwithreset;
} else if (thflags & TH_SYN) {
/* non-initial SYN is ignored */
- if ((tcp_timer_active(tp, TT_DELACK) ||
+ if ((tcp_timer_active(tp, TT_DELACK) ||
tcp_timer_active(tp, TT_REXMT)))
goto drop;
} else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
@@ -1985,7 +1985,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_flags2 |= TF2_ECN_PERMIT;
TCPSTAT_INC(tcps_ecn_shs);
}
-
+
/*
* Received <SYN,ACK> in SYN_SENT[*] state.
* Transitions:
@@ -2300,14 +2300,14 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
/*
* If last ACK falls within this segment's sequence numbers,
* record its timestamp.
- * NOTE:
+ * NOTE:
* 1) That the test incorporates suggestions from the latest
* proposal of the tcplw@cray.com list (Braden 1993/04/26).
* 2) That updating only on newer timestamps interferes with
* our earlier PAWS tests, so this check should be solely
* predicated on the sequence space of this segment.
- * 3) That we modify the segment boundary check to be
- * Last.ACK.Sent <= SEG.SEQ + SEG.Len
+ * 3) That we modify the segment boundary check to be
+ * Last.ACK.Sent <= SEG.SEQ + SEG.Len
* instead of RFC1323's
* Last.ACK.Sent < SEG.SEQ + SEG.Len,
* This modified check allows us to overcome RFC1323's
@@ -2376,7 +2376,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
/*
* Account for the ACK of our SYN prior to
* regular ACK processing below.
- */
+ */
tp->snd_una++;
}
if (tp->t_flags & TF_NEEDFIN) {
@@ -2511,10 +2511,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if ((tp->t_flags & TF_SACK_PERMIT) &&
IN_FASTRECOVERY(tp->t_flags)) {
int awnd;
-
+
/*
* Compute the amount of data in flight first.
- * We can inject new data into the pipe iff
+ * We can inject new data into the pipe iff
* we have less than 1/2 the original window's
* worth of data in flight.
*/
diff --git a/sys/netinet/tcp_log_buf.c b/sys/netinet/tcp_log_buf.c
index e37cb639dbe1..1e2603680a46 100644
--- a/sys/netinet/tcp_log_buf.c
+++ b/sys/netinet/tcp_log_buf.c
@@ -648,7 +648,7 @@ restart:
KASSERT(bucket_locked || tlb == NULL,
("%s: bucket_locked (%d) and tlb (%p) are "
"inconsistent", __func__, bucket_locked, tlb));
-
+
if (bucket_locked) {
TCPID_BUCKET_UNLOCK(tlb);
bucket_locked = false;
@@ -728,7 +728,7 @@ refind:
* Remember that we constructed (struct tcp_log_id_node) so
* we can safely cast the id to it for the purposes of finding.
*/
- KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
+ KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
__func__, __LINE__));
tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
(struct tcp_log_id_bucket *) id);
@@ -1351,7 +1351,7 @@ tcp_log_tcpcbfini(struct tcpcb *tp)
* There are two ways we could keep logs: per-socket or per-ID. If
* we are tracking logs with an ID, then the logs survive the
* destruction of the TCPCB.
- *
+ *
* If the TCPCB is associated with an ID node, move the logs from the
* TCPCB to the ID node. In theory, this is safe, for reasons which I
* will now explain for my own benefit when I next need to figure out
@@ -1361,7 +1361,7 @@ tcp_log_tcpcbfini(struct tcpcb *tp)
* of this node (Rule C). Further, no one can remove this node from
* the bucket while we hold the lock (Rule D). Basically, no one can
* mess with this node. That leaves two states in which we could be:
- *
+ *
* 1. Another thread is currently waiting to acquire the INP lock, with
* plans to do something with this node. When we drop the INP lock,
* they will have a chance to do that. They will recheck the
@@ -1770,7 +1770,7 @@ tcp_log_state_change(struct tcpcb *tp, int state)
if (tcp_disable_all_bb_logs) {
/* We are prohibited from doing any logs */
tp->t_logstate = TCP_LOG_STATE_OFF;
- }
+ }
tp->t_flags2 &= ~(TF2_LOG_AUTO);
return (0);
@@ -2110,7 +2110,7 @@ tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
sopt.sopt_val = hdr + 1;
sopt.sopt_valsize -= sizeof(struct tcp_log_header);
sopt.sopt_td = NULL;
-
+
error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
(struct tcp_log_buffer **)&end, entry->tldl_count);
if (error) {
@@ -2380,7 +2380,7 @@ tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
* If this isn't associated with a TCPCB, we can pull it off
* the list now. We need to be careful that the expire timer
* hasn't already taken ownership (tln_expiretime == SBT_MAX).
- * If so, we let the expire timer code free the data.
+ * If so, we let the expire timer code free the data.
*/
if (cur_tln->tln_closed) {
no_inp:
@@ -2618,7 +2618,7 @@ tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
return;
}
- /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
+ /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
tcp_log_dumpbucketlogs(tlb, reason);
}
diff --git a/sys/netinet/tcp_log_buf.h b/sys/netinet/tcp_log_buf.h
index 267d0ed045ca..5b470a541504 100644
--- a/sys/netinet/tcp_log_buf.h
+++ b/sys/netinet/tcp_log_buf.h
@@ -305,7 +305,7 @@ struct tcp_log_dev_log_queue {
* information when needed.
*
* Prototype:
- * TCP_LOG_EVENT(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
+ * TCP_LOG_EVENT(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
* struct sockbuf *txbuf, uint8_t eventid, int errornum,
* union tcp_log_stackspecific *stackinfo)
*
diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
index a5ab79113a87..fcd0c36ad83c 100644
--- a/sys/netinet/tcp_lro.c
+++ b/sys/netinet/tcp_lro.c
@@ -443,7 +443,7 @@ tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
union tcp_log_stackspecific log;
struct timeval tv;
uint32_t cts;
-
+
cts = tcp_get_usecs(&tv);
memset(&log, 0, sizeof(union tcp_log_stackspecific));
log.u_bbr.flex8 = frm;
@@ -556,9 +556,9 @@ tcp_flush_out_le(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, in
tcp_lro_log(tp, lc, le, NULL, 7, 0, 0, 0, 0);
}
}
- /*
- * Break any chain, this is not set to NULL on the singleton
- * case m_nextpkt points to m_head. Other case set them
+ /*
+ * Break any chain, this is not set to NULL on the singleton
+ * case m_nextpkt points to m_head. Other case set them
* m_nextpkt to NULL in push_and_replace.
*/
le->m_head->m_nextpkt = NULL;
@@ -646,7 +646,7 @@ tcp_set_le_to_m(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
le->m_tail = m_last(m);
le->append_cnt = 0;
le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
- ~csum);
+ ~csum);
le->append_cnt++;
th->th_sum = csum; /* Restore checksum on first packet. */
}
@@ -656,7 +656,7 @@ tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le
{
/*
* Push up the stack the current le and replace
- * it with m.
+ * it with m.
*/
struct mbuf *msave;
@@ -666,7 +666,7 @@ tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le
/* Now push out the old le entry */
tcp_flush_out_le(tp, lc, le, locked);
/*
- * Now to replace the data properly in the le
+ * Now to replace the data properly in the le
* we have to reset the tcp header and
* other fields.
*/
@@ -678,9 +678,9 @@ tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le
static void
tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked)
{
- /*
- * Walk through the mbuf chain we
- * have on tap and compress/condense
+ /*
+ * Walk through the mbuf chain we
+ * have on tap and compress/condense
* as required.
*/
uint32_t *ts_ptr;
@@ -689,9 +689,9 @@ tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, in
uint16_t tcp_data_len, csum_upd;
int l;
- /*
- * First we must check the lead (m_head)
- * we must make sure that it is *not*
+ /*
+ * First we must check the lead (m_head)
+ * we must make sure that it is *not*
* something that should be sent up
* right away (sack etc).
*/
@@ -703,7 +703,7 @@ again:
return;
}
th = tcp_lro_get_th(le, le->m_head);
- KASSERT(th != NULL,
+ KASSERT(th != NULL,
("le:%p m:%p th comes back NULL?", le, le->m_head));
l = (th->th_off << 2);
l -= sizeof(*th);
@@ -729,7 +729,7 @@ again:
goto again;
}
while((m = le->m_head->m_nextpkt) != NULL) {
- /*
+ /*
* condense m into le, first
* pull m out of the list.
*/
@@ -738,7 +738,7 @@ again:
/* Setup my data */
tcp_data_len = m->m_pkthdr.lro_len;
th = tcp_lro_get_th(le, m);
- KASSERT(th != NULL,
+ KASSERT(th != NULL,
("le:%p m:%p th comes back NULL?", le, m));
ts_ptr = (uint32_t *)(th + 1);
l = (th->th_off << 2);
@@ -871,14 +871,14 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
#ifdef TCPHPTS
struct inpcb *inp = NULL;
int need_wakeup = 0, can_queue = 0;
- struct epoch_tracker et;
+ struct epoch_tracker et;
/* Now lets lookup the inp first */
CURVNET_SET(lc->ifp->if_vnet);
/*
* XXXRRS Currently the common input handler for
* mbuf queuing cannot handle VLAN Tagged. This needs
- * to be fixed and the or condition removed (i.e. the
+ * to be fixed and the or condition removed (i.e. the
* common code should do the right lookup for the vlan
* tag and anything else that the vlan_input() does).
*/
@@ -907,7 +907,7 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) ||
(inp->inp_flags2 & INP_FREED))) {
/* We don't want this guy */
- INP_WUNLOCK(inp);
+ INP_WUNLOCK(inp);
inp = NULL;
}
if (inp && (inp->inp_flags2 & INP_SUPPORTS_MBUFQ)) {
@@ -916,13 +916,13 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
if (le->need_wakeup ||
((inp->inp_in_input == 0) &&
((inp->inp_flags2 & INP_MBUF_QUEUE_READY) == 0))) {
- /*
+ /*
* Either the transport is off on a keep-alive
* (it has the queue_ready flag clear and its
* not already been woken) or the entry has
* some urgent thing (FIN or possibly SACK blocks).
* This means we need to wake the transport up by
- * putting it on the input pacer.
+ * putting it on the input pacer.
*/
need_wakeup = 1;
if ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) &&
@@ -949,7 +949,7 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
inp->inp_flags2, inp->inp_in_input, le->need_wakeup);
tcp_queue_pkts(tp, le);
if (need_wakeup) {
- /*
+ /*
* We must get the guy to wakeup via
* hpts.
*/
@@ -1233,7 +1233,7 @@ tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
(*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
- /*
+ /*
* We have an option besides Timestamps, maybe
* it is a sack (most likely) which means we
* will probably need to wake up a sleeper (if
@@ -1362,7 +1362,7 @@ tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
break;
#endif
- }
+ }
le->source_port = th->th_sport;
le->dest_port = th->th_dport;
le->next_seq = seq + tcp_data_len;
@@ -1392,7 +1392,7 @@ tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
le->m_last_mbuf = m;
m->m_nextpkt = NULL;
le->m_prev_last = NULL;
- /*
+ /*
* We keep the total size here for cross checking when we may need
* to flush/wakeup in the MBUF_QUEUE case.
*/
diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h
index 1c6d2dd54e1b..f2c05ad4aec7 100644
--- a/sys/netinet/tcp_lro.h
+++ b/sys/netinet/tcp_lro.h
@@ -77,12 +77,12 @@ struct lro_entry {
uint16_t mbuf_appended;
struct timeval mtime;
};
-/*
- * Note: The mbuf_cnt field tracks our number of mbufs added to the m_next
- * list. Each mbuf counted can have data and of course it will
- * have an ack as well (by defintion any inbound tcp segment will
+/*
+ * Note: The mbuf_cnt field tracks our number of mbufs added to the m_next
+ * list. Each mbuf counted can have data and of course it will
+ * have an ack as well (by defintion any inbound tcp segment will
* have an ack value. We use this count to tell us how many ACK's
- * are present for our ack-count threshold. If we exceed that or
+ * are present for our ack-count threshold. If we exceed that or
* the data threshold we will wake up the endpoint.
*/
LIST_HEAD(lro_head, lro_entry);
@@ -130,7 +130,7 @@ void tcp_lro_flush_all(struct lro_ctrl *);
int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);
void tcp_lro_queue_mbuf(struct lro_ctrl *, struct mbuf *);
void tcp_lro_reg_mbufq(void);
-void tcp_lro_dereg_mbufq(void);
+void tcp_lro_dereg_mbufq(void);
#define TCP_LRO_NO_ENTRIES -2
#define TCP_LRO_CANNOT -1
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index d308f9bf4c1e..e563dd9bda03 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -301,7 +301,7 @@ again:
if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) &&
(p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
uint32_t cwin;
-
+
cwin =
imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0);
/* Do not retransmit SACK segments beyond snd_recover */
@@ -412,14 +412,14 @@ after_sack_rexmit:
off);
/*
* Don't remove this (len > 0) check !
- * We explicitly check for len > 0 here (although it
- * isn't really necessary), to work around a gcc
+ * We explicitly check for len > 0 here (although it
+ * isn't really necessary), to work around a gcc
* optimization issue - to force gcc to compute
* len above. Without this check, the computation
* of len is bungled by the optimizer.
*/
if (len > 0) {
- cwin = tp->snd_cwnd -
+ cwin = tp->snd_cwnd -
(tp->snd_nxt - tp->sack_newdata) -
sack_bytes_rxmt;
if (cwin < 0)
@@ -658,7 +658,7 @@ after_sack_rexmit:
} else
oldwin = 0;
- /*
+ /*
* If the new window size ends up being the same as or less
* than the old size when it is scaled, then don't force
* a window update.
@@ -706,7 +706,7 @@ dontupdate:
!tcp_timer_active(tp, TT_PERSIST)) {
tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
goto just_return;
- }
+ }
/*
* TCP window updates are not reliable, rather a polling protocol
* using ``persist'' packets is used to insure receipt of window
@@ -1058,7 +1058,7 @@ send:
&len, if_hw_tsomaxsegcount,
if_hw_tsomaxsegsize, msb, hw_tls);
if (len <= (tp->t_maxseg - optlen)) {
- /*
+ /*
* Must have ran out of mbufs for the copy
* shorten it to no longer need tso. Lets
* not put on sendalot since we are low on
@@ -1153,7 +1153,7 @@ send:
} else
flags |= TH_ECE|TH_CWR;
}
-
+
if (tp->t_state == TCPS_ESTABLISHED &&
(tp->t_flags2 & TF2_ECN_PERMIT)) {
/*
@@ -1172,18 +1172,18 @@ send:
ip->ip_tos |= IPTOS_ECN_ECT0;
TCPSTAT_INC(tcps_ecn_ect0);
}
-
+
/*
* Reply with proper ECN notifications.
*/
if (tp->t_flags2 & TF2_ECN_SND_CWR) {
flags |= TH_CWR;
tp->t_flags2 &= ~TF2_ECN_SND_CWR;
- }
+ }
if (tp->t_flags2 & TF2_ECN_SND_ECE)
flags |= TH_ECE;
}
-
+
/*
* If we are doing retransmissions, then snd_nxt will
* not reflect the first unsent octet. For ACK only
@@ -1464,7 +1464,7 @@ out:
* In transmit state, time the transmission and arrange for
* the retransmit. In persist state, just set snd_max.
*/
- if ((tp->t_flags & TF_FORCEDATA) == 0 ||
+ if ((tp->t_flags & TF_FORCEDATA) == 0 ||
!tcp_timer_active(tp, TT_PERSIST)) {
tcp_seq startseq = tp->snd_nxt;
diff --git a/sys/netinet/tcp_ratelimit.c b/sys/netinet/tcp_ratelimit.c
index 28f845221f28..6fcd97b9e1b8 100644
--- a/sys/netinet/tcp_ratelimit.c
+++ b/sys/netinet/tcp_ratelimit.c
@@ -66,45 +66,186 @@ __FBSDID("$FreeBSD$");
* For the purposes of each send, what is the size
* of an ethernet frame.
*/
-#ifndef ETHERNET_SEGMENT_SIZE
-#define ETHERNET_SEGMENT_SIZE 1500
-#endif
MALLOC_DEFINE(M_TCPPACE, "tcp_hwpace", "TCP Hardware pacing memory");
#ifdef RATELIMIT
+/*
+ * The following preferred table will seem weird to
+ * the casual viewer. Why do we not have any rates below
+ * 1Mbps? Why do we have a rate at 1.44Mbps called common?
+ * Why do the rates cluster in the 1-100Mbps range more
+ * than others? Why does the table jump around at the beginnign
+ * and then be more consistently raising?
+ *
+ * Let me try to answer those questions. A lot of
+ * this is dependant on the hardware. We have three basic
+ * supporters of rate limiting
+ *
+ * Chelsio - Supporting 16 configurable rates.
+ * Mlx - c4 supporting 13 fixed rates.
+ * Mlx - c5 & c6 supporting 127 configurable rates.
+ *
+ * The c4 is why we have a common rate that is available
+ * in all rate tables. This is a selected rate from the
+ * c4 table and we assure its available in all ratelimit
+ * tables. This way the tcp_ratelimit code has an assured
+ * rate it should always be able to get. This answers a
+ * couple of the questions above.
+ *
+ * So what about the rest, well the table is built to
+ * try to get the most out of a joint hardware/software
+ * pacing system. The software pacer will always pick
+ * a rate higher than the b/w that it is estimating
+ *
+ * on the path. This is done for two reasons.
+ * a) So we can discover more b/w
+ * and
+ * b) So we can send a block of MSS's down and then
+ * have the software timer go off after the previous
+ * send is completely out of the hardware.
+ *
+ * But when we do <b> we don't want to have the delay
+ * between the last packet sent by the hardware be
+ * excessively long (to reach our desired rate).
+ *
+ * So let me give an example for clarity.
+ *
+ * Lets assume that the tcp stack sees that 29,110,000 bps is
+ * what the bw of the path is. The stack would select the
+ * rate 31Mbps. 31Mbps means that each send that is done
+ * by the hardware will cause a 387 micro-second gap between
+ * the pacets sent at that rate. For 29,110,000 bps we
+ * would need 412 micro-seconds gap between each send.
+ *
+ * Now we pick a MSS size based on the delta between the
+ * two rates (412 - 387) divided into the rate we really
+ * wish to send at rounded up. That results in a MSS
+ * send of 17 mss's at once. The hardware then will
+ * run out of data in a single 17MSS send in 6,579 micro-seconds.
+ * On the other hand the software pacer will send more data
+ * in 7,004 micro-seconds. This means that we will refill
+ * the hardware 25 microseconds after it would have sent
+ * next. This is a win since we no are only sending every
+ * 7ms or so and yet all the packets are spaced on
+ * the wire with 94% of what they should be and only
+ * the last packet is delayed extra to make up for the
+ * difference. Note that the above formula has two
+ * important caveat. If we are above (b/w wise) over
+ * 100Mbps we double the result of the MSS calculation.
+ * The second caveat is if we are 500Mbps or more
+ * we just send the maximum MSS at once i.e. 45MSS
+ *
+ */
#define COMMON_RATE 180500
uint64_t desired_rates[] = {
- 62500, /* 500Kbps */
- 180500, /* 1.44Mpbs */
- 375000, /* 3Mbps */
- 500000, /* 4Mbps */
- 625000, /* 5Mbps */
- 750000, /* 6Mbps */
- 1000000, /* 8Mbps */
- 1250000, /* 10Mbps */
- 2500000, /* 20Mbps */
- 3750000, /* 30Mbps */
- 5000000, /* 40Meg */
- 6250000, /* 50Mbps */
- 12500000, /* 100Mbps */
- 25000000, /* 200Mbps */
- 50000000, /* 400Mbps */
- 100000000, /* 800Mbps */
- 12500, /* 100kbps */
- 25000, /* 200kbps */
- 875000, /* 7Mbps */
- 1125000, /* 9Mbps */
- 1875000, /* 15Mbps */
- 3125000, /* 25Mbps */
- 8125000, /* 65Mbps */
- 10000000, /* 80Mbps */
- 18750000, /* 150Mbps */
- 20000000, /* 250Mbps */
- 37500000, /* 350Mbps */
- 62500000, /* 500Mbps */
- 78125000, /* 625Mbps */
- 125000000, /* 1Gbps */
+ 122500, /* 1Mbps - rate 1 */
+ 180500, /* 1.44Mpbs - rate 2 common rate */
+ 375000, /* 3Mbps - rate 3 */
+ 625000, /* 5Mbps - rate 4 */
+ 875000, /* 7Mbps - rate 5 */
+ 1125000, /* 9Mbps - rate 6 */
+ 1375000, /* 11Mbps - rate 7 */
+ 1625000, /* 13Mbps - rate 8 */
+ 2625000, /* 21Mbps - rate 9 */
+ 3875000, /* 31Mbps - rate 10 */
+ 5125000, /* 41Meg - rate 11 */
+ 12500000, /* 100Mbps - rate 12 */
+ 25000000, /* 200Mbps - rate 13 */
+ 50000000, /* 400Mbps - rate 14 */
+ 63750000, /* 51Mbps - rate 15 */
+ 100000000, /* 800Mbps - rate 16 */
+ 1875000, /* 15Mbps - rate 17 */
+ 2125000, /* 17Mbps - rate 18 */
+ 2375000, /* 19Mbps - rate 19 */
+ 2875000, /* 23Mbps - rate 20 */
+ 3125000, /* 25Mbps - rate 21 */
+ 3375000, /* 27Mbps - rate 22 */
+ 3625000, /* 29Mbps - rate 23 */
+ 4125000, /* 33Mbps - rate 24 */
+ 4375000, /* 35Mbps - rate 25 */
+ 4625000, /* 37Mbps - rate 26 */
+ 4875000, /* 39Mbps - rate 27 */
+ 5375000, /* 43Mbps - rate 28 */
+ 5625000, /* 45Mbps - rate 29 */
+ 5875000, /* 47Mbps - rate 30 */
+ 6125000, /* 49Mbps - rate 31 */
+ 6625000, /* 53Mbps - rate 32 */
+ 6875000, /* 55Mbps - rate 33 */
+ 7125000, /* 57Mbps - rate 34 */
+ 7375000, /* 59Mbps - rate 35 */
+ 7625000, /* 61Mbps - rate 36 */
+ 7875000, /* 63Mbps - rate 37 */
+ 8125000, /* 65Mbps - rate 38 */
+ 8375000, /* 67Mbps - rate 39 */
+ 8625000, /* 69Mbps - rate 40 */
+ 8875000, /* 71Mbps - rate 41 */
+ 9125000, /* 73Mbps - rate 42 */
+ 9375000, /* 75Mbps - rate 43 */
+ 9625000, /* 77Mbps - rate 44 */
+ 9875000, /* 79Mbps - rate 45 */
+ 10125000, /* 81Mbps - rate 46 */
+ 10375000, /* 83Mbps - rate 47 */
+ 10625000, /* 85Mbps - rate 48 */
+ 10875000, /* 87Mbps - rate 49 */
+ 11125000, /* 89Mbps - rate 50 */
+ 11375000, /* 91Mbps - rate 51 */
+ 11625000, /* 93Mbps - rate 52 */
+ 11875000, /* 95Mbps - rate 53 */
+ 13125000, /* 105Mbps - rate 54 */
+ 13750000, /* 110Mbps - rate 55 */
+ 14375000, /* 115Mbps - rate 56 */
+ 15000000, /* 120Mbps - rate 57 */
+ 15625000, /* 125Mbps - rate 58 */
+ 16250000, /* 130Mbps - rate 59 */
+ 16875000, /* 135Mbps - rate 60 */
+ 17500000, /* 140Mbps - rate 61 */
+ 18125000, /* 145Mbps - rate 62 */
+ 18750000, /* 150Mbps - rate 64 */
+ 20000000, /* 160Mbps - rate 65 */
+ 21250000, /* 170Mbps - rate 66 */
+ 22500000, /* 180Mbps - rate 67 */
+ 23750000, /* 190Mbps - rate 68 */
+ 26250000, /* 210Mbps - rate 69 */
+ 27500000, /* 220Mbps - rate 70 */
+ 28750000, /* 230Mbps - rate 71 */
+ 30000000, /* 240Mbps - rate 72 */
+ 31250000, /* 250Mbps - rate 73 */
+ 34375000, /* 275Mbps - rate 74 */
+ 37500000, /* 300Mbps - rate 75 */
+ 40625000, /* 325Mbps - rate 76 */
+ 43750000, /* 350Mbps - rate 77 */
+ 46875000, /* 375Mbps - rate 78 */
+ 53125000, /* 425Mbps - rate 79 */
+ 56250000, /* 450Mbps - rate 80 */
+ 59375000, /* 475Mbps - rate 81 */
+ 62500000, /* 500Mbps - rate 82 */
+ 68750000, /* 550Mbps - rate 83 */
+ 75000000, /* 600Mbps - rate 84 */
+ 81250000, /* 650Mbps - rate 85 */
+ 87500000, /* 700Mbps - rate 86 */
+ 93750000, /* 750Mbps - rate 87 */
+ 106250000, /* 850Mbps - rate 88 */
+ 112500000, /* 900Mbps - rate 89 */
+ 125000000, /* 1Gbps - rate 90 */
+ 156250000, /* 1.25Gps - rate 91 */
+ 187500000, /* 1.5Gps - rate 92 */
+ 218750000, /* 1.75Gps - rate 93 */
+ 250000000, /* 2Gbps - rate 94 */
+ 281250000, /* 2.25Gps - rate 95 */
+ 312500000, /* 2.5Gbps - rate 96 */
+ 343750000, /* 2.75Gbps - rate 97 */
+ 375000000, /* 3Gbps - rate 98 */
+ 500000000, /* 4Gbps - rate 99 */
+ 625000000, /* 5Gbps - rate 100 */
+ 750000000, /* 6Gbps - rate 101 */
+ 875000000, /* 7Gbps - rate 102 */
+ 1000000000, /* 8Gbps - rate 103 */
+ 1125000000, /* 9Gbps - rate 104 */
+ 1250000000, /* 10Gbps - rate 105 */
+ 1875000000, /* 15Gbps - rate 106 */
+ 2500000000 /* 20Gbps - rate 107 */
};
+
#define MAX_HDWR_RATES (sizeof(desired_rates)/sizeof(uint64_t))
#define RS_ORDERED_COUNT 16 /*
* Number that are in order
@@ -381,16 +522,24 @@ rt_setup_new_rs(struct ifnet *ifp, int *error)
* We can do nothing if we cannot
* get a query back from the driver.
*/
+ printf("No query functions for %s:%d-- failed\n",
+ ifp->if_dname, ifp->if_dunit);
return (NULL);
}
rs = malloc(sizeof(struct tcp_rate_set), M_TCPPACE, M_NOWAIT | M_ZERO);
if (rs == NULL) {
if (error)
*error = ENOMEM;
+ printf("No memory for malloc\n");
return (NULL);
}
+ memset(&rl, 0, sizeof(rl));
rl.flags = RT_NOSUPPORT;
ifp->if_ratelimit_query(ifp, &rl);
+ printf("if:%s:%d responds with flags:0x%x rate count:%d\n",
+ ifp->if_dname,
+ ifp->if_dunit,
+ rl.flags, rl.number_of_rates);
if (rl.flags & RT_IS_UNUSABLE) {
/*
* The interface does not really support
@@ -433,7 +582,7 @@ rt_setup_new_rs(struct ifnet *ifp, int *error)
mtx_unlock(&rs_mtx);
return (rs);
} else if ((rl.flags & RT_IS_FIXED_TABLE) == RT_IS_FIXED_TABLE) {
- /* Mellanox most likely */
+ /* Mellanox C4 likely */
rs->rs_ifp = ifp;
rs->rs_if_dunit = ifp->if_dunit;
rs->rs_rate_cnt = rl.number_of_rates;
@@ -444,7 +593,7 @@ rt_setup_new_rs(struct ifnet *ifp, int *error)
rs->rs_disable = 0;
rate_table_act = rl.rate_table;
} else if ((rl.flags & RT_IS_SELECTABLE) == RT_IS_SELECTABLE) {
- /* Chelsio */
+ /* Chelsio, C5 and C6 of Mellanox? */
rs->rs_ifp = ifp;
rs->rs_if_dunit = ifp->if_dunit;
rs->rs_rate_cnt = rl.number_of_rates;
@@ -536,6 +685,14 @@ bail:
rs->rs_lowest_valid = i;
} else {
int err;
+
+ if ((rl.flags & RT_IS_SETUP_REQ) &&
+ (ifp->if_ratelimit_query)) {
+ err = ifp->if_ratelimit_setup(ifp,
+ rs->rs_rlt[i].rate, i);
+ if (err)
+ goto handle_err;
+ }
#ifdef RSS
hash_type = M_HASHTYPE_RSS_TCP_IPV4;
#else
@@ -547,6 +704,7 @@ bail:
rs->rs_rlt[i].rate,
&rs->rs_rlt[i].tag);
if (err) {
+handle_err:
if (i == (rs->rs_rate_cnt - 1)) {
/*
* Huh - first rate and we can't get
@@ -980,13 +1138,22 @@ tcp_rl_ifnet_link(void *arg __unused, struct ifnet *ifp, int link_state)
* We only care on an interface going up that is rate-limit
* capable.
*/
+ printf("ifp:%s.%d does not support rate-limit(0x%x) or link_state is not UP(state:%d)\n",
+ ifp->if_dname,
+ ifp->if_dunit,
+ ifp->if_capabilities,
+ link_state);
return;
}
mtx_lock(&rs_mtx);
+ printf("Link UP on interface %s.%d\n",
+ ifp->if_dname,
+ ifp->if_dunit);
CK_LIST_FOREACH(rs, &int_rs, next) {
if ((rs->rs_ifp == ifp) &&
(rs->rs_if_dunit == ifp->if_dunit)) {
/* We already have initialized this guy */
+ printf("Interface already initialized\n");
mtx_unlock(&rs_mtx);
return;
}
@@ -1087,6 +1254,7 @@ tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp,
*error = EINVAL;
rte = NULL;
}
+ *error = 0;
return (rte);
}
@@ -1196,6 +1364,112 @@ tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp)
in_pcbdetach_txrtlmt(tp->t_inpcb);
}
+#define ONE_POINT_TWO_MEG 150000 /* 1.2 megabits in bytes */
+#define ONE_HUNDRED_MBPS 12500000 /* 100Mbps in bytes per second */
+#define FIVE_HUNDRED_MBPS 62500000 /* 500Mbps in bytes per second */
+#define MAX_MSS_SENT 43 /* 43 mss = 43 x 1500 = 64,500 bytes */
+
+uint32_t
+tcp_get_pacing_mss(uint64_t bw, uint32_t segsiz, int can_use_1mss,
+ const struct tcp_hwrate_limit_table *te)
+{
+ /*
+ * We use the google formula to calculate the
+ * TSO size. I.E.
+ * bw < 24Meg
+ * tso = 2mss
+ * else
+ * tso = min(bw/1000, 64k)
+ *
+ * Note for these calculations we ignore the
+ * packet overhead (enet hdr, ip hdr and tcp hdr).
+ */
+ uint64_t lentim, res, bytes;
+ uint32_t new_tso, min_tso_segs;
+
+ bytes = bw / 1000;
+ if (bytes > (64 * 1000))
+ bytes = 64 * 1000;
+ /* Round up */
+ new_tso = (bytes + segsiz - 1) / segsiz;
+ if (can_use_1mss && (bw < ONE_POINT_TWO_MEG))
+ min_tso_segs = 1;
+ else
+ min_tso_segs = 2;
+ if (new_tso < min_tso_segs)
+ new_tso = min_tso_segs;
+ if (new_tso > MAX_MSS_SENT)
+ new_tso = MAX_MSS_SENT;
+ new_tso *= segsiz;
+ /*
+ * If we are not doing hardware pacing
+ * then we are done.
+ */
+ if (te == NULL)
+ return(new_tso);
+ /*
+ * For hardware pacing we look at the
+ * rate you are sending at and compare
+ * that to the rate you have in hardware.
+ *
+ * If the hardware rate is slower than your
+ * software rate then you are in error and
+ * we will build a queue in our hardware whic
+ * is probably not desired, in such a case
+ * just return the non-hardware TSO size.
+ *
+ * If the rate in hardware is faster (which
+ * it should be) then look at how long it
+ * takes to send one ethernet segment size at
+ * your b/w and compare that to the time it
+ * takes to send at the rate you had selected.
+ *
+ * If your time is greater (which we hope it is)
+ * we get the delta between the two, and then
+ * divide that into your pacing time. This tells
+ * us how many MSS you can send down at once (rounded up).
+ *
+ * Note we also double this value if the b/w is over
+ * 100Mbps. If its over 500meg we just set you to the
+ * max (43 segments).
+ */
+ if (te->rate > FIVE_HUNDRED_MBPS)
+ return (segsiz * MAX_MSS_SENT);
+ if (te->rate == bw) {
+ /* We are pacing at exactly the hdwr rate */
+ return (segsiz * MAX_MSS_SENT);
+ }
+ lentim = ETHERNET_SEGMENT_SIZE * USECS_IN_SECOND;
+ res = lentim / bw;
+ if (res > te->time_between) {
+ uint32_t delta, segs;
+
+ delta = res - te->time_between;
+ segs = (res + delta - 1)/delta;
+ if (te->rate > ONE_HUNDRED_MBPS)
+ segs *= 2;
+ if (segs < min_tso_segs)
+ segs = min_tso_segs;
+ if (segs > MAX_MSS_SENT)
+ segs = MAX_MSS_SENT;
+ segs *= segsiz;
+ if (segs < new_tso) {
+ /* unexpected ? */
+ return(new_tso);
+ } else {
+ return (segs);
+ }
+ } else {
+ /*
+ * Your time is smaller which means
+ * we will grow a queue on our
+ * hardware. Send back the non-hardware
+ * rate.
+ */
+ return (new_tso);
+ }
+}
+
static eventhandler_tag rl_ifnet_departs;
static eventhandler_tag rl_ifnet_arrives;
static eventhandler_tag rl_shutdown_start;
diff --git a/sys/netinet/tcp_ratelimit.h b/sys/netinet/tcp_ratelimit.h
index ebd4c4204704..2b62156a868d 100644
--- a/sys/netinet/tcp_ratelimit.h
+++ b/sys/netinet/tcp_ratelimit.h
@@ -88,6 +88,9 @@ CK_LIST_HEAD(head_tcp_rate_set, tcp_rate_set);
#define RS_PACING_SUB_OK 0x0010 /* If a rate can't be found get the
* next best rate (highest or lowest). */
#ifdef _KERNEL
+#ifndef ETHERNET_SEGMENT_SIZE
+#define ETHERNET_SEGMENT_SIZE 1514
+#endif
#ifdef RATELIMIT
#define DETAILED_RATELIMIT_SYSCTL 1 /*
* Undefine this if you don't want
@@ -135,7 +138,17 @@ tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte,
{
return;
}
-
#endif
+/*
+ * Given a b/w and a segsiz, and optional hardware
+ * rate limit, return the ideal size to burst
+ * out at once. Note the parameter can_use_1mss
+ * dictates if the transport will tolerate a 1mss
+ * limit, if not it will bottom out at 2mss (think
+ * delayed ack).
+ */
+uint32_t
+tcp_get_pacing_mss(uint64_t bw, uint32_t segsiz, int can_use_1mss,
+ const struct tcp_hwrate_limit_table *te);
#endif
#endif
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index 73bf051f8ad2..aec5b4d0f5f5 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -321,7 +321,7 @@ tcp_reass_flush(struct tcpcb *tp)
static void
tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last,
- struct mbuf *m, struct tcphdr *th, int tlen,
+ struct mbuf *m, struct tcphdr *th, int tlen,
struct mbuf *mlast, int lenofoh)
{
@@ -350,7 +350,7 @@ tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, str
int tlen, struct mbuf *mlast, int lenofoh)
{
int i;
-
+
#ifdef TCP_REASS_LOGGING
tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0);
#endif
@@ -381,7 +381,7 @@ tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, str
#endif
}
-static void
+static void
tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint8_t flags)
{
@@ -397,7 +397,7 @@ tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
m_freem(q->tqe_m);
KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
("Tp:%p seg queue goes negative", tp));
- tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
+ tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
q->tqe_mbuf_cnt = mbufoh;
q->tqe_m = m;
q->tqe_last = mlast;
@@ -420,7 +420,7 @@ static void
tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent,
struct tseg_qent *q)
{
- /*
+ /*
* Merge q into ent and free q from the list.
*/
#ifdef TCP_REASS_LOGGING
@@ -473,8 +473,8 @@ tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
tp->t_segqlen--;
continue;
}
- /*
- * Trim the q entry to dovetail to this one
+ /*
+ * Trim the q entry to dovetail to this one
* and then merge q into ent updating max
* in the process.
*/
@@ -493,7 +493,7 @@ tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
#endif
}
-static int
+static int
tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast)
{
int len = MSIZE;
@@ -571,7 +571,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
* the rcv_nxt <-> rcv_wnd but thats
* already done for us by the caller.
*/
-#ifdef TCP_REASS_COUNTERS
+#ifdef TCP_REASS_COUNTERS
counter_u64_add(tcp_zero_input, 1);
#endif
m_freem(m);
@@ -616,7 +616,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
if (last != NULL) {
if ((th->th_flags & TH_FIN) &&
SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) {
- /*
+ /*
* Someone is trying to game us, dump
* the segment.
*/
@@ -656,8 +656,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
}
}
if (last->tqe_flags & TH_FIN) {
- /*
- * We have data after the FIN on the last?
+ /*
+ * We have data after the FIN on the last?
*/
*tlenp = 0;
m_freem(m);
@@ -669,7 +669,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
*tlenp = last->tqe_len;
return (0);
} else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) {
- /*
+ /*
* Second common case, we missed
* another one and have something more
* for the end.
@@ -681,8 +681,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
* new segment |---|
*/
if (last->tqe_flags & TH_FIN) {
- /*
- * We have data after the FIN on the last?
+ /*
+ * We have data after the FIN on the last?
*/
*tlenp = 0;
m_freem(m);
@@ -726,8 +726,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
counter_u64_add(reass_path3, 1);
#endif
if (SEQ_LT(th->th_seq, tp->rcv_nxt)) {
- /*
- * The resend was even before
+ /*
+ * The resend was even before
* what we have. We need to trim it.
* Note TSNH (it should be trimmed
* before the call to tcp_reass()).
@@ -785,7 +785,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
}
p = TAILQ_PREV(q, tsegqe_head, tqe_q);
/**
- * Now is this fit just in-between only?
+ * Now is this fit just in-between only?
* i.e.:
* p---+ +----q
* v v
@@ -856,8 +856,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
}
}
if (th->th_seq == (p->tqe_start + p->tqe_len)) {
- /*
- * If dovetails in with this one
+ /*
+ * If dovetails in with this one
* append it.
*/
/**
@@ -882,7 +882,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
q = p;
} else {
/*
- * The new data runs over the
+ * The new data runs over the
* top of previously sack'd data (in q).
* It may be partially overlapping, or
* it may overlap the entire segment.
@@ -903,7 +903,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
#endif
tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, th->th_flags);
} else {
- /*
+ /*
* We just need to prepend the data
* to this. It does not overrun
* the end.
@@ -924,8 +924,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
*tlenp = q->tqe_len;
goto present;
- /*
- * When we reach here we can't combine it
+ /*
+ * When we reach here we can't combine it
* with any existing segment.
*
* Limit the number of segments that can be queued to reduce the
@@ -965,9 +965,9 @@ new_entry:
if (tcp_new_limits) {
if ((tp->t_segqlen > tcp_reass_queue_guard) &&
(*tlenp < MSIZE)) {
- /*
+ /*
* This is really a lie, we are not full but
- * are getting a segment that is above
+ * are getting a segment that is above
* guard threshold. If it is and its below
* a mbuf size (256) we drop it if it
* can't fill in some place.
diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c
index 80c014f6fa3d..c9874a37a1d8 100644
--- a/sys/netinet/tcp_sack.c
+++ b/sys/netinet/tcp_sack.c
@@ -141,7 +141,7 @@ SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_VNET | CTLFLAG_RW,
VNET_DEFINE(int, tcp_sack_globalmaxholes) = 65536;
SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_VNET | CTLFLAG_RW,
- &VNET_NAME(tcp_sack_globalmaxholes), 0,
+ &VNET_NAME(tcp_sack_globalmaxholes), 0,
"Global maximum number of TCP SACK holes");
VNET_DEFINE(int, tcp_sack_globalholes) = 0;
@@ -397,7 +397,7 @@ tcp_clean_dsack_blocks(struct tcpcb *tp)
/*
* Clean up any DSACK blocks that
* are in our queue of sack blocks.
- *
+ *
*/
num_saved = 0;
for (i = 0; i < tp->rcv_numsacks; i++) {
@@ -638,18 +638,18 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
sblkp--;
sack_changed = 1;
} else {
- /*
- * We failed to add a new hole based on the current
- * sack block. Skip over all the sack blocks that
+ /*
+ * We failed to add a new hole based on the current
+ * sack block. Skip over all the sack blocks that
* fall completely to the right of snd_fack and
* proceed to trim the scoreboard based on the
* remaining sack blocks. This also trims the
* scoreboard for th_ack (which is sack_blocks[0]).
*/
- while (sblkp >= sack_blocks &&
+ while (sblkp >= sack_blocks &&
SEQ_LT(tp->snd_fack, sblkp->start))
sblkp--;
- if (sblkp >= sack_blocks &&
+ if (sblkp >= sack_blocks &&
SEQ_LT(tp->snd_fack, sblkp->end))
tp->snd_fack = sblkp->end;
}
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 3801ebc0ec20..16a6d3053de4 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -390,11 +390,11 @@ struct tcp_function_block *
find_and_ref_tcp_functions(struct tcp_function_set *fs)
{
struct tcp_function_block *blk;
-
- rw_rlock(&tcp_function_lock);
+
+ rw_rlock(&tcp_function_lock);
blk = find_tcp_functions_locked(fs);
if (blk)
- refcount_acquire(&blk->tfb_refcnt);
+ refcount_acquire(&blk->tfb_refcnt);
rw_runlock(&tcp_function_lock);
return(blk);
}
@@ -403,10 +403,10 @@ struct tcp_function_block *
find_and_ref_tcp_fb(struct tcp_function_block *blk)
{
struct tcp_function_block *rblk;
-
- rw_rlock(&tcp_function_lock);
+
+ rw_rlock(&tcp_function_lock);
rblk = find_tcp_fb_locked(blk, NULL);
- if (rblk)
+ if (rblk)
refcount_acquire(&rblk->tfb_refcnt);
rw_runlock(&tcp_function_lock);
return(rblk);
@@ -510,7 +510,7 @@ sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
fs.pcbcnt = blk->tfb_refcnt;
}
- rw_runlock(&tcp_function_lock);
+ rw_runlock(&tcp_function_lock);
error = sysctl_handle_string(oidp, fs.function_set_name,
sizeof(fs.function_set_name), req);
@@ -521,8 +521,8 @@ sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
rw_wlock(&tcp_function_lock);
blk = find_tcp_functions_locked(&fs);
if ((blk == NULL) ||
- (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
- error = ENOENT;
+ (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
+ error = ENOENT;
goto done;
}
tcp_func_set_ptr = blk;
@@ -564,7 +564,7 @@ sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
bufsz -= linesz;
outsz = linesz;
- rw_rlock(&tcp_function_lock);
+ rw_rlock(&tcp_function_lock);
TAILQ_FOREACH(f, &t_functions, tf_next) {
alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
@@ -866,7 +866,7 @@ register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
(blk->tfb_tcp_do_segment == NULL) ||
(blk->tfb_tcp_ctloutput == NULL) ||
(strlen(blk->tfb_tcp_block_name) == 0)) {
- /*
+ /*
* These functions are required and you
* need a name.
*/
@@ -878,7 +878,7 @@ register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
blk->tfb_tcp_timer_active ||
blk->tfb_tcp_timer_stop) {
/*
- * If you define one timer function you
+ * If you define one timer function you
* must have them all.
*/
if ((blk->tfb_tcp_timer_stop_all == NULL) ||
@@ -1481,7 +1481,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
m = n;
} else {
/*
- * reuse the mbuf.
+ * reuse the mbuf.
* XXX MRT We inherit the FIB, which is lucky.
*/
m_freem(m->m_next);
@@ -1914,12 +1914,12 @@ tcp_discardcb(struct tcpcb *tp)
tcp_timer_stop(tp, TT_2MSL);
tcp_timer_stop(tp, TT_DELACK);
if (tp->t_fb->tfb_tcp_timer_stop_all) {
- /*
- * Call the stop-all function of the methods,
+ /*
+ * Call the stop-all function of the methods,
* this function should call the tcp_timer_stop()
* method with each of the function specific timeouts.
* That stop will be called via the tfb_tcp_timer_stop()
- * which should use the async drain function of the
+ * which should use the async drain function of the
* callout system (see tcp_var.h).
*/
tp->t_fb->tfb_tcp_timer_stop_all(tp);
@@ -1989,7 +1989,7 @@ tcp_discardcb(struct tcpcb *tp)
if (tp->t_flags & TF_TOE)
tcp_offload_detach(tp);
#endif
-
+
tcp_free_sackholes(tp);
#ifdef TCPPCAP
@@ -2035,7 +2035,7 @@ tcp_timer_discard(void *ptp)
struct inpcb *inp;
struct tcpcb *tp;
struct epoch_tracker et;
-
+
tp = (struct tcpcb *)ptp;
CURVNET_SET(tp->t_vnet);
NET_EPOCH_ENTER(et);
@@ -2448,7 +2448,7 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
if (cmd == PRC_MSGSIZE)
notify = tcp_mtudisc_notify;
else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
- cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
+ cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
cmd == PRC_TIMXCEED_INTRANS) && ip)
notify = tcp_drop_syn_sent;
@@ -2582,7 +2582,7 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
if (cmd == PRC_MSGSIZE)
notify = tcp_mtudisc_notify;
else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
- cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
+ cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
notify = tcp_drop_syn_sent;
@@ -2850,7 +2850,7 @@ tcp_drop_syn_sent(struct inpcb *inp, int errno)
if (IS_FASTOPEN(tp->t_flags))
tcp_fastopen_disable_path(tp);
-
+
tp = tcp_drop(tp, errno);
if (tp != NULL)
return (inp);
@@ -2887,7 +2887,7 @@ tcp_mtudisc(struct inpcb *inp, int mtuoffer)
KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
-
+
so = inp->inp_socket;
SOCKBUF_LOCK(&so->so_snd);
/* If the mss is larger than the socket buffer, decrease the mss. */
@@ -3248,7 +3248,7 @@ sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
INP_WUNLOCK(inp);
} else {
struct socket *so;
-
+
so = inp->inp_socket;
soref(so);
error = ktls_set_tx_mode(so,
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index e5f7dde531ca..26d1a68a45cd 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -902,7 +902,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
struct sockaddr_in sin;
inp->inp_options = (m) ? ip_srcroute(m) : NULL;
-
+
if (inp->inp_options == NULL) {
inp->inp_options = sc->sc_ipopts;
sc->sc_ipopts = NULL;
@@ -946,11 +946,11 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
if (V_functions_inherit_listen_socket_stack && blk != tp->t_fb) {
/*
* Our parents t_fb was not the default,
- * we need to release our ref on tp->t_fb and
+ * we need to release our ref on tp->t_fb and
* pickup one on the new entry.
*/
struct tcp_function_block *rblk;
-
+
rblk = find_and_ref_tcp_fb(blk);
KASSERT(rblk != NULL,
("cannot find blk %p out of syncache?", blk));
@@ -967,7 +967,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
if (tp->t_fb->tfb_tcp_fb_init) {
(*tp->t_fb->tfb_tcp_fb_init)(tp);
}
- }
+ }
tp->snd_wl1 = sc->sc_irs;
tp->snd_max = tp->iss + 1;
tp->snd_nxt = tp->iss + 1;
@@ -1207,7 +1207,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
/*
* Pull out the entry to unlock the bucket row.
- *
+ *
* NOTE: We must decrease TCPS_SYN_RECEIVED count here, not
* tcp_state_change(). The tcpcb is not existent at this
* moment. A new one will be allocated via syncache_socket->
@@ -2172,7 +2172,7 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc)
}
static struct syncache *
-syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
+syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
struct socket *lso)
{
@@ -2208,7 +2208,7 @@ syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
sc->sc_flags = 0;
bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
sc->sc_ipopts = NULL;
-
+
sc->sc_irs = seq;
sc->sc_iss = ack;
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index c3fc0c4183d0..2209cdd9cafe 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -131,7 +131,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW,
"Assume SO_KEEPALIVE on all TCP connections");
int tcp_fast_finwait2_recycle = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
&tcp_fast_finwait2_recycle, 0,
"Recycle closed FIN_WAIT_2 connections faster");
@@ -326,8 +326,8 @@ tcp_timer_2msl(void *xtp)
* If in TIME_WAIT state just ignore as this timeout is handled in
* tcp_tw_2msl_scan().
*
- * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
- * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
+ * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
+ * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
* Ignore fact that there were recent incoming segments.
*/
if ((inp->inp_flags & INP_TIMEWAIT) != 0) {
@@ -336,7 +336,7 @@ tcp_timer_2msl(void *xtp)
return;
}
if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
- tp->t_inpcb && tp->t_inpcb->inp_socket &&
+ tp->t_inpcb && tp->t_inpcb->inp_socket &&
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
TCPSTAT_INC(tcps_finwait2_drops);
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
@@ -344,7 +344,7 @@ tcp_timer_2msl(void *xtp)
goto out;
}
NET_EPOCH_ENTER(et);
- tp = tcp_close(tp);
+ tp = tcp_close(tp);
NET_EPOCH_EXIT(et);
tcp_inpinfo_lock_del(inp, tp);
goto out;
@@ -723,7 +723,7 @@ tcp_timer_rexmt(void * xtp)
tp->t_pmtud_saved_maxseg = tp->t_maxseg;
}
- /*
+ /*
* Reduce the MSS to blackhole value or to the default
* in an attempt to retransmit.
*/
@@ -930,7 +930,7 @@ tcp_timer_active(struct tcpcb *tp, uint32_t timer_type)
* timer never to run. The flag is needed to assure
* a race does not leave it running and cause
* the timer to possibly restart itself (keep and persist
- * especially do this).
+ * especially do this).
*/
int
tcp_timer_suspend(struct tcpcb *tp, uint32_t timer_type)
@@ -988,7 +988,7 @@ tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type)
(tcp_timer_active((tp), TT_PERSIST) == 0) &&
tp->snd_wnd) {
/* We have outstanding data activate a timer */
- tcp_timer_activate(tp, TT_REXMT,
+ tcp_timer_activate(tp, TT_REXMT,
tp->t_rxtcur);
}
}
@@ -1053,7 +1053,7 @@ tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type)
break;
default:
if (tp->t_fb->tfb_tcp_timer_stop) {
- /*
+ /*
* XXXrrs we need to look at this with the
* stop case below (flags).
*/
@@ -1067,7 +1067,7 @@ tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type)
/*
* Can't stop the callout, defer tcpcb actual deletion
* to the last one. We do this using the async drain
- * function and incrementing the count in
+ * function and incrementing the count in
*/
tp->t_timers->tt_draincnt++;
}
diff --git a/sys/netinet/tcp_timer.h b/sys/netinet/tcp_timer.h
index fe3616c26641..01880c52b84c 100644
--- a/sys/netinet/tcp_timer.h
+++ b/sys/netinet/tcp_timer.h
@@ -168,7 +168,7 @@ struct tcp_timer {
#define TT_2MSL 0x0010
#define TT_MASK (TT_DELACK|TT_REXMT|TT_PERSIST|TT_KEEP|TT_2MSL)
-/*
+/*
* Suspend flags - used when suspending a timer
* from ever running again.
*/
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index 9038a7695666..736dc4dab644 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -1713,7 +1713,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt)
* Protect the TCP option TCP_FUNCTION_BLK so
* that a sub-function can *never* overwrite this.
*/
- if ((sopt->sopt_dir == SOPT_SET) &&
+ if ((sopt->sopt_dir == SOPT_SET) &&
(sopt->sopt_name == TCP_FUNCTION_BLK)) {
INP_WUNLOCK(inp);
error = sooptcopyin(sopt, &fsn, sizeof fsn,
@@ -1733,13 +1733,13 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt)
return (0);
}
if (tp->t_state != TCPS_CLOSED) {
- /*
+ /*
* The user has advanced the state
* past the initial point, we may not
- * be able to switch.
+ * be able to switch.
*/
if (blk->tfb_tcp_handoff_ok != NULL) {
- /*
+ /*
* Does the stack provide a
* query mechanism, if so it may
* still be possible?
@@ -1758,19 +1758,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt)
INP_WUNLOCK(inp);
return (ENOENT);
}
- /*
+ /*
* Release the old refcnt, the
* lookup acquired a ref on the
* new one already.
*/
if (tp->t_fb->tfb_tcp_fb_fini) {
- /*
+ /*
* Tell the stack to cleanup with 0 i.e.
* the tcb is not going away.
*/
(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
}
-#ifdef TCPHPTS
+#ifdef TCPHPTS
/* Assure that we are not on any hpts */
tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_ALL);
#endif
@@ -1800,7 +1800,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt)
err_out:
INP_WUNLOCK(inp);
return (error);
- } else if ((sopt->sopt_dir == SOPT_GET) &&
+ } else if ((sopt->sopt_dir == SOPT_GET) &&
(sopt->sopt_name == TCP_FUNCTION_BLK)) {
strncpy(fsn.function_set_name, tp->t_fb->tfb_tcp_block_name,
TCP_FUNCTION_NAME_LEN_MAX);
@@ -2493,7 +2493,7 @@ tcp_usrclosed(struct tcpcb *tp)
if (tp->t_state == TCPS_FIN_WAIT_2) {
int timeout;
- timeout = (tcp_fast_finwait2_recycle) ?
+ timeout = (tcp_fast_finwait2_recycle) ?
tcp_finwait2_timeout : TP_MAXIDLE(tp);
tcp_timer_activate(tp, TT_2MSL, timeout);
}
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index 7539dcb7ffaa..d8a71eb88542 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -240,7 +240,7 @@ struct tcptemp {
/* Minimum map entries limit value, if set */
#define TCP_MIN_MAP_ENTRIES_LIMIT 128
-/*
+/*
* TODO: We yet need to brave plowing in
* to tcp_input() and the pru_usrreq() block.
* Right now these go to the old standards which
@@ -612,7 +612,7 @@ struct tcpstat {
uint64_t tcps_sack_rcv_blocks; /* SACK blocks (options) received */
uint64_t tcps_sack_send_blocks; /* SACK blocks (options) sent */
uint64_t tcps_sack_sboverflow; /* times scoreboard overflowed */
-
+
/* ECN related stats */
uint64_t tcps_ecn_ce; /* ECN Congestion Experienced */
uint64_t tcps_ecn_ect0; /* ECN Capable Transport */
diff --git a/sys/netinet/udp.h b/sys/netinet/udp.h
index 7c08135d02cf..263a64fbe588 100644
--- a/sys/netinet/udp.h
+++ b/sys/netinet/udp.h
@@ -47,7 +47,7 @@ struct udphdr {
u_short uh_sum; /* udp checksum */
};
-/*
+/*
* User-settable options (used with setsockopt).
*/
#define UDP_ENCAP 1
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index 749fb9d2ae27..79f78813154d 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -641,7 +641,7 @@ udp_input(struct mbuf **mp, int *offp, int proto)
UDPLITE_PROBE(receive, NULL, last, ip, last, uh);
else
UDP_PROBE(receive, NULL, last, ip, last, uh);
- if (udp_append(last, ip, m, iphlen, udp_in) == 0)
+ if (udp_append(last, ip, m, iphlen, udp_in) == 0)
INP_RUNLOCK(last);
inp_lost:
return (IPPROTO_DONE);
@@ -741,7 +741,7 @@ udp_input(struct mbuf **mp, int *offp, int proto)
UDPLITE_PROBE(receive, NULL, inp, ip, inp, uh);
else
UDP_PROBE(receive, NULL, inp, ip, inp, uh);
- if (udp_append(inp, ip, m, iphlen, udp_in) == 0)
+ if (udp_append(inp, ip, m, iphlen, udp_in) == 0)
INP_RUNLOCK(inp);
return (IPPROTO_DONE);
@@ -1075,7 +1075,7 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt)
break;
}
break;
- }
+ }
return (error);
}
diff --git a/sys/netinet/udp_var.h b/sys/netinet/udp_var.h
index 965bd490fdf2..5d04a9da9c1a 100644
--- a/sys/netinet/udp_var.h
+++ b/sys/netinet/udp_var.h
@@ -60,7 +60,7 @@ struct mbuf;
typedef void(*udp_tun_func_t)(struct mbuf *, int, struct inpcb *,
const struct sockaddr *, void *);
typedef void(*udp_tun_icmp_t)(int, struct sockaddr *, void *, void *);
-
+
/*
* UDP control block; one per udp.
*/
diff --git a/sys/netinet/udplite.h b/sys/netinet/udplite.h
index 57a1422a9407..8cd8d833f1e1 100644
--- a/sys/netinet/udplite.h
+++ b/sys/netinet/udplite.h
@@ -40,7 +40,7 @@ struct udplitehdr {
u_short udplite_checksum; /* UDP-Lite checksum */
};
-/*
+/*
* User-settable options (used with setsockopt).
*/
#define UDPLITE_SEND_CSCOV 2 /* Sender checksum coverage. */