aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_condvar.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2004-10-12 18:36:20 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2004-10-12 18:36:20 +0000
commit2ff0e645d12894c66e97d9d33c33a1d296d74fa2 (patch)
tree81c7d98eb60f7036658dff8a8da6c4a5f54833aa /sys/kern/kern_condvar.c
parentc7836018ea20fe6b3b0ed5b50317444970c042cb (diff)
downloadsrc-2ff0e645d12894c66e97d9d33c33a1d296d74fa2.tar.gz
src-2ff0e645d12894c66e97d9d33c33a1d296d74fa2.zip
Refine the turnstile and sleep queue interfaces just a bit:
- Add a new _lock() call to each API that locks the associated chain lock for a lock_object pointer or wait channel. The _lookup() functions now require that the chain lock be locked via _lock() when they are called. - Change sleepq_add(), turnstile_wait() and turnstile_claim() to lookup the associated queue structure internally via _lookup() rather than accepting a pointer from the caller. For turnstiles, this means that the actual lookup of the turnstile in the hash table is only done when the thread actually blocks rather than being done on each loop iteration in _mtx_lock_sleep(). For sleep queues, this means that sleepq_lookup() is no longer used outside of the sleep queue code except to implement an assertion in cv_destroy(). - Change sleepq_broadcast() and sleepq_signal() to require that the chain lock is already required. For condition variables, this lets the cv_broadcast() and cv_signal() functions lock the sleep queue chain lock while testing the waiters count. This means that the waiters count internal to condition variables is no longer protected by the interlock mutex and cv_broadcast() and cv_signal() now no longer require that the interlock be held when they are called. This lets consumers of condition variables drop the lock before waking other threads which can result in fewer context switches. MFC after: 1 month
Notes
Notes: svn path=/head/; revision=136445
Diffstat (limited to 'sys/kern/kern_condvar.c')
-rw-r--r--sys/kern/kern_condvar.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index fc0f7996f5a1..8ccea3aab414 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -76,8 +76,9 @@ void
cv_destroy(struct cv *cvp)
{
#ifdef INVARIANTS
- struct sleepqueue *sq;
+ struct sleepqueue *sq;
+ sleepq_lock(cvp);
sq = sleepq_lookup(cvp);
sleepq_release(cvp);
KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
@@ -94,7 +95,6 @@ cv_destroy(struct cv *cvp)
void
cv_wait(struct cv *cvp, struct mtx *mp)
{
- struct sleepqueue *sq;
struct thread *td;
WITNESS_SAVE_DECL(mp);
@@ -118,13 +118,13 @@ cv_wait(struct cv *cvp, struct mtx *mp)
return;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_wait(cvp);
#ifdef KTRACE
@@ -145,7 +145,6 @@ cv_wait(struct cv *cvp, struct mtx *mp)
int
cv_wait_sig(struct cv *cvp, struct mtx *mp)
{
- struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval, sig;
@@ -172,7 +171,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
return (0);
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
/*
* Don't bother sleeping if we are exiting and not the exiting
@@ -190,7 +189,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE);
sig = sleepq_catch_signals(cvp);
rval = sleepq_wait_sig(cvp);
@@ -216,7 +215,6 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
int
cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
{
- struct sleepqueue *sq;
struct thread *td;
int rval;
WITNESS_SAVE_DECL(mp);
@@ -242,13 +240,13 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
cvp->cv_waiters++;
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_set_timeout(cvp, timo);
rval = sleepq_timedwait(cvp);
@@ -272,7 +270,6 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
int
cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
{
- struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval;
@@ -301,7 +298,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
- sq = sleepq_lookup(cvp);
+ sleepq_lock(cvp);
/*
* Don't bother sleeping if we are exiting and not the exiting
@@ -319,7 +316,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT();
mtx_unlock(mp);
- sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
+ sleepq_add(cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE);
sleepq_set_timeout(cvp, timo);
sig = sleepq_catch_signals(cvp);
@@ -349,10 +346,12 @@ void
cv_signal(struct cv *cvp)
{
+ sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters--;
sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
- }
+ } else
+ sleepq_release(cvp);
}
/*
@@ -363,8 +362,10 @@ void
cv_broadcastpri(struct cv *cvp, int pri)
{
+ sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
- }
+ } else
+ sleepq_release(cvp);
}