aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_condvar.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2015-05-21 16:43:26 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2015-05-21 16:43:26 +0000
commitc636f94bd2ff15be5b904939872b4bce31456c18 (patch)
treee3d629fdffd1d096c7a4fe3aa0ad5da3f0a1ec47 /sys/kern/kern_condvar.c
parentb6fb3fe0bdf0633561b09353e6dc425a82188b77 (diff)
downloadsrc-c636f94bd2ff15be5b904939872b4bce31456c18.tar.gz
src-c636f94bd2ff15be5b904939872b4bce31456c18.zip
Revert r282971. It depends on condvar consumers not destroying condvars
until all threads sleeping on a condvar have resumed execution after being awakened. However, there are cases where that guarantee is very hard to provide.
Notes
Notes: svn path=/head/; revision=283250
Diffstat (limited to 'sys/kern/kern_condvar.c')
-rw-r--r--sys/kern/kern_condvar.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 21807649296d..2700a25d477c 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -122,7 +122,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock)
sleepq_lock(cvp);
- atomic_add_int(&cvp->cv_waiters, 1);
+ cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
@@ -137,7 +137,6 @@ _cv_wait(struct cv *cvp, struct lock_object *lock)
sleepq_lock(cvp);
}
sleepq_wait(cvp, 0);
- atomic_subtract_int(&cvp->cv_waiters, 1);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -185,7 +184,7 @@ _cv_wait_unlock(struct cv *cvp, struct lock_object *lock)
sleepq_lock(cvp);
- atomic_add_int(&cvp->cv_waiters, 1);
+ cvp->cv_waiters++;
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
@@ -195,7 +194,6 @@ _cv_wait_unlock(struct cv *cvp, struct lock_object *lock)
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
sleepq_wait(cvp, 0);
- atomic_subtract_int(&cvp->cv_waiters, 1);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -242,7 +240,7 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *lock)
sleepq_lock(cvp);
- atomic_add_int(&cvp->cv_waiters, 1);
+ cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
@@ -258,7 +256,6 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *lock)
sleepq_lock(cvp);
}
rval = sleepq_wait_sig(cvp, 0);
- atomic_subtract_int(&cvp->cv_waiters, 1);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -310,7 +307,7 @@ _cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt,
sleepq_lock(cvp);
- atomic_add_int(&cvp->cv_waiters, 1);
+ cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
@@ -326,7 +323,6 @@ _cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt,
sleepq_lock(cvp);
}
rval = sleepq_timedwait(cvp, 0);
- atomic_subtract_int(&cvp->cv_waiters, 1);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -380,7 +376,7 @@ _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock,
sleepq_lock(cvp);
- atomic_add_int(&cvp->cv_waiters, 1);
+ cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
@@ -397,7 +393,6 @@ _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock,
sleepq_lock(cvp);
}
rval = sleepq_timedwait_sig(cvp, 0);
- atomic_subtract_int(&cvp->cv_waiters, 1);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -426,8 +421,10 @@ cv_signal(struct cv *cvp)
wakeup_swapper = 0;
sleepq_lock(cvp);
- if (cvp->cv_waiters > 0)
+ if (cvp->cv_waiters > 0) {
+ cvp->cv_waiters--;
wakeup_swapper = sleepq_signal(cvp, SLEEPQ_CONDVAR, 0, 0);
+ }
sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();
@@ -450,8 +447,10 @@ cv_broadcastpri(struct cv *cvp, int pri)
if (pri == -1)
pri = 0;
sleepq_lock(cvp);
- if (cvp->cv_waiters > 0)
+ if (cvp->cv_waiters > 0) {
+ cvp->cv_waiters = 0;
wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
+ }
sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();