aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/uipc_usrreq.c
diff options
context:
space:
mode:
authorRobert Watson <rwatson@FreeBSD.org>2006-07-11 21:49:54 +0000
committerRobert Watson <rwatson@FreeBSD.org>2006-07-11 21:49:54 +0000
commit337cc6b60e79b43ed75cb11481a7961790846251 (patch)
tree695895022b917555c7f0a6de78edd1e686e7ff0d /sys/kern/uipc_usrreq.c
parent90aff9de2d1dd3d55b66001feb65700932d24967 (diff)
downloadsrc-337cc6b60e79b43ed75cb11481a7961790846251.tar.gz
src-337cc6b60e79b43ed75cb11481a7961790846251.zip
Reduce periods of simultaneous acquisition of various socket buffer
locks and the unplock during uipc_rcvd() and uipc_send() by caching certain values from one structure while its locks are held, and applying them to a second structure while its locks are held. If done carefully, this should be correct, and will reduce the amount of work done with the global unp lock held. Tested by: kris (earlier version)
Notes
Notes: svn path=/head/; revision=160278
Diffstat (limited to 'sys/kern/uipc_usrreq.c')
-rw-r--r--sys/kern/uipc_usrreq.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 17d361105c8f..2c7222398cb6 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -299,41 +299,45 @@ uipc_rcvd(struct socket *so, int flags)
{
struct unpcb *unp;
struct socket *so2;
+ u_int mbcnt, sbcc;
u_long newhiwat;
unp = sotounpcb(so);
KASSERT(unp != NULL, ("uipc_rcvd: unp == NULL"));
- UNP_LOCK();
switch (so->so_type) {
case SOCK_DGRAM:
panic("uipc_rcvd DGRAM?");
/*NOTREACHED*/
case SOCK_STREAM:
- if (unp->unp_conn == NULL)
- break;
- so2 = unp->unp_conn->unp_socket;
- SOCKBUF_LOCK(&so2->so_snd);
- SOCKBUF_LOCK(&so->so_rcv);
/*
* Adjust backpressure on sender
* and wakeup any waiting to write.
*/
- so2->so_snd.sb_mbmax += unp->unp_mbcnt - so->so_rcv.sb_mbcnt;
- unp->unp_mbcnt = so->so_rcv.sb_mbcnt;
- newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc -
- so->so_rcv.sb_cc;
+ SOCKBUF_LOCK(&so->so_rcv);
+ mbcnt = so->so_rcv.sb_mbcnt;
+ sbcc = so->so_rcv.sb_cc;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ UNP_LOCK();
+ if (unp->unp_conn == NULL) {
+ UNP_UNLOCK();
+ break;
+ }
+ so2 = unp->unp_conn->unp_socket;
+ SOCKBUF_LOCK(&so2->so_snd);
+ so2->so_snd.sb_mbmax += unp->unp_mbcnt - mbcnt;
+ newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc - sbcc;
(void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat,
newhiwat, RLIM_INFINITY);
- unp->unp_cc = so->so_rcv.sb_cc;
- SOCKBUF_UNLOCK(&so->so_rcv);
sowwakeup_locked(so2);
+ unp->unp_mbcnt = mbcnt;
+ unp->unp_cc = sbcc;
+ UNP_UNLOCK();
break;
default:
panic("uipc_rcvd unknown socktype");
}
- UNP_UNLOCK();
return (0);
}
@@ -346,6 +350,7 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
int error = 0;
struct unpcb *unp;
struct socket *so2;
+ u_int mbcnt, sbcc;
u_long newhiwat;
unp = sotounpcb(so);
@@ -416,9 +421,8 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
}
}
- SOCKBUF_LOCK(&so->so_snd);
+ /* Lockless read. */
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
- SOCKBUF_UNLOCK(&so->so_snd);
error = EPIPE;
break;
}
@@ -445,16 +449,20 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
} else {
sbappend_locked(&so2->so_rcv, m);
}
- so->so_snd.sb_mbmax -=
- so2->so_rcv.sb_mbcnt - unp->unp_conn->unp_mbcnt;
+ mbcnt = so2->so_rcv.sb_mbcnt - unp->unp_conn->unp_mbcnt;
unp->unp_conn->unp_mbcnt = so2->so_rcv.sb_mbcnt;
+ sbcc = so2->so_rcv.sb_cc;
+ sorwakeup_locked(so2);
+
+ SOCKBUF_LOCK(&so->so_snd);
newhiwat = so->so_snd.sb_hiwat -
- (so2->so_rcv.sb_cc - unp->unp_conn->unp_cc);
+ (sbcc - unp->unp_conn->unp_cc);
(void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat,
newhiwat, RLIM_INFINITY);
+ so->so_snd.sb_mbmax -= mbcnt;
SOCKBUF_UNLOCK(&so->so_snd);
- unp->unp_conn->unp_cc = so2->so_rcv.sb_cc;
- sorwakeup_locked(so2);
+
+ unp->unp_conn->unp_cc = sbcc;
m = NULL;
break;