summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2008-09-12 21:00:11 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2008-09-12 21:00:11 +0000
commitc0b37a7029152fb217fbeed1c345a4a79a72c5ad (patch)
tree9ee0fe8c0bafd8468303c00bfcb1d1c32a521160
parent53cdcac04cade8edf3c94bca680227cefa4f24cb (diff)
Notes
-rw-r--r--sys/kern/kern_condvar.c12
-rw-r--r--sys/kern/kern_sig.c12
-rw-r--r--sys/kern/kern_sx.c23
-rw-r--r--sys/kern/kern_synch.c32
-rw-r--r--sys/kern/kern_thread.c49
-rw-r--r--sys/kern/subr_sleepqueue.c67
-rw-r--r--sys/sys/proc.h4
-rw-r--r--sys/sys/sleepqueue.h6
-rw-r--r--sys/vm/vm_glue.c53
9 files changed, 159 insertions, 99 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 5d3403acc5110..6757202e074b2 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -389,13 +389,17 @@ _cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo)
void
cv_signal(struct cv *cvp)
{
+ int wakeup_swapper;
+ wakeup_swapper = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters--;
- sleepq_signal(cvp, SLEEPQ_CONDVAR, -1, 0);
+ wakeup_swapper = sleepq_signal(cvp, SLEEPQ_CONDVAR, -1, 0);
}
sleepq_release(cvp);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -405,11 +409,15 @@ cv_signal(struct cv *cvp)
void
cv_broadcastpri(struct cv *cvp, int pri)
{
+ int wakeup_swapper;
+ wakeup_swapper = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
- sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
+ wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
} else
sleepq_release(cvp);
+ if (wakeup_swapper)
+ kick_proc0();
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index ebf50580d2e1e..f38eccb3e9d4f 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2047,6 +2047,7 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
struct sigacts *ps;
int intrval;
int ret = 0;
+ int wakeup_swapper;
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -2272,11 +2273,14 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* the PROCESS runnable, leave it stopped.
* It may run a bit until it hits a thread_suspend_check().
*/
+ wakeup_swapper = 0;
thread_lock(td);
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
- sleepq_abort(td, intrval);
+ wakeup_swapper = sleepq_abort(td, intrval);
thread_unlock(td);
PROC_SUNLOCK(p);
+ if (wakeup_swapper)
+ kick_proc0();
goto out;
/*
* Mutexes are short lived. Threads waiting on them will
@@ -2353,7 +2357,9 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
{
struct proc *p = td->td_proc;
register int prop;
+ int wakeup_swapper;
+ wakeup_swapper = 0;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -2400,7 +2406,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
if (td->td_priority > PUSER)
sched_prio(td, PUSER);
- sleepq_abort(td, intrval);
+ wakeup_swapper = sleepq_abort(td, intrval);
} else {
/*
* Other states do nothing with the signal immediately,
@@ -2412,6 +2418,8 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
forward_signal(td);
#endif
}
+ if (wakeup_swapper)
+ kick_proc0();
}
static void
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 23dd4b620ed2a..043a7cbf2036d 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -354,6 +354,7 @@ void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
@@ -394,16 +395,20 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
* shared lock. If there are any shared waiters, wake them up.
*/
+ wakeup_swapper = 0;
x = sx->sx_lock;
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
(x & SX_LOCK_EXCLUSIVE_WAITERS));
if (x & SX_LOCK_SHARED_WAITERS)
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
- SQ_SHARED_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ -1, SQ_SHARED_QUEUE);
else
sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
+
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -585,7 +590,7 @@ void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
uintptr_t x;
- int queue;
+ int queue, wakeup_swapper;
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
@@ -623,7 +628,10 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&sx->sx_lock, x);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
+ queue);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -801,6 +809,7 @@ void
_sx_sunlock_hard(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
for (;;) {
x = sx->sx_lock;
@@ -868,8 +877,10 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p waking up all thread on"
"exclusive queue", __func__, sx);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
- SQ_EXCLUSIVE_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ -1, SQ_EXCLUSIVE_QUEUE);
+ if (wakeup_swapper)
+ kick_proc0();
break;
}
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index cde386fd4d01a..d63f4b9063903 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -336,9 +336,12 @@ pause(const char *wmesg, int timo)
void
wakeup(void *ident)
{
+ int wakeup_swapper;
sleepq_lock(ident);
- sleepq_broadcast(ident, SLEEPQ_SLEEP, -1, 0);
+ wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, -1, 0);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -349,10 +352,13 @@ wakeup(void *ident)
void
wakeup_one(void *ident)
{
+ int wakeup_swapper;
sleepq_lock(ident);
- sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0);
+ wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0);
sleepq_release(ident);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -448,11 +454,11 @@ mi_switch(int flags, struct thread *newtd)
}
/*
- * Change process state to be runnable,
- * placing it on the run queue if it is in memory,
- * and awakening the swapper if it isn't in memory.
+ * Change thread state to be runnable, placing it on the run queue if
+ * it is in memory. If it is swapped out, return true so our caller
+ * will know to awaken the swapper.
*/
-void
+int
setrunnable(struct thread *td)
{
@@ -462,15 +468,15 @@ setrunnable(struct thread *td)
switch (td->td_state) {
case TDS_RUNNING:
case TDS_RUNQ:
- return;
+ return (0);
case TDS_INHIBITED:
/*
* If we are only inhibited because we are swapped out
* then arange to swap in this process. Otherwise just return.
*/
if (td->td_inhibitors != TDI_SWAPPED)
- return;
- /* XXX: intentional fall-through ? */
+ return (0);
+ /* FALLTHROUGH */
case TDS_CAN_RUN:
break;
default:
@@ -480,15 +486,11 @@ setrunnable(struct thread *td)
if ((td->td_flags & TDF_INMEM) == 0) {
if ((td->td_flags & TDF_SWAPINREQ) == 0) {
td->td_flags |= TDF_SWAPINREQ;
- /*
- * due to a LOR between the thread lock and
- * the sleepqueue chain locks, use
- * lower level scheduling functions.
- */
- kick_proc0();
+ return (1);
}
} else
sched_wakeup(td);
+ return (0);
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index f305cfdcf369d..407d86c1a4e3b 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -391,6 +391,7 @@ thread_exit(void)
struct thread *td;
struct thread *td2;
struct proc *p;
+ int wakeup_swapper;
td = curthread;
p = td->td_proc;
@@ -465,8 +466,11 @@ thread_exit(void)
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount) {
thread_lock(p->p_singlethread);
- thread_unsuspend_one(p->p_singlethread);
+ wakeup_swapper = thread_unsuspend_one(
+ p->p_singlethread);
thread_unlock(p->p_singlethread);
+ if (wakeup_swapper)
+ kick_proc0();
}
}
@@ -631,7 +635,7 @@ thread_single(int mode)
struct thread *td;
struct thread *td2;
struct proc *p;
- int remaining;
+ int remaining, wakeup_swapper;
td = curthread;
p = td->td_proc;
@@ -668,6 +672,7 @@ thread_single(int mode)
while (remaining != 1) {
if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
goto stopme;
+ wakeup_swapper = 0;
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
continue;
@@ -679,18 +684,22 @@ thread_single(int mode)
if (td->td_flags & TDF_DBSUSPEND)
td->td_flags &= ~TDF_DBSUSPEND;
if (TD_IS_SUSPENDED(td2))
- thread_unsuspend_one(td2);
+ wakeup_swapper |=
+ thread_unsuspend_one(td2);
if (TD_ON_SLEEPQ(td2) &&
(td2->td_flags & TDF_SINTR))
- sleepq_abort(td2, EINTR);
+ wakeup_swapper |=
+ sleepq_abort(td2, EINTR);
break;
case SINGLE_BOUNDARY:
if (TD_IS_SUSPENDED(td2) &&
!(td2->td_flags & TDF_BOUNDARY))
- thread_unsuspend_one(td2);
+ wakeup_swapper |=
+ thread_unsuspend_one(td2);
if (TD_ON_SLEEPQ(td2) &&
(td2->td_flags & TDF_SINTR))
- sleepq_abort(td2, ERESTART);
+ wakeup_swapper |=
+ sleepq_abort(td2, ERESTART);
break;
default:
if (TD_IS_SUSPENDED(td2)) {
@@ -714,6 +723,8 @@ thread_single(int mode)
#endif
thread_unlock(td2);
}
+ if (wakeup_swapper)
+ kick_proc0();
if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
else if (mode == SINGLE_BOUNDARY)
@@ -793,6 +804,7 @@ thread_suspend_check(int return_instead)
{
struct thread *td;
struct proc *p;
+ int wakeup_swapper;
td = curthread;
p = td->td_proc;
@@ -836,8 +848,11 @@ thread_suspend_check(int return_instead)
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
- thread_unsuspend_one(p->p_singlethread);
+ wakeup_swapper =
+ thread_unsuspend_one(p->p_singlethread);
thread_unlock(p->p_singlethread);
+ if (wakeup_swapper)
+ kick_proc0();
}
}
PROC_UNLOCK(p);
@@ -904,7 +919,7 @@ thread_suspend_one(struct thread *td)
TD_SET_SUSPENDED(td);
}
-void
+int
thread_unsuspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
@@ -914,7 +929,7 @@ thread_unsuspend_one(struct thread *td)
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
p->p_suspcount--;
- setrunnable(td);
+ return (setrunnable(td));
}
/*
@@ -924,14 +939,16 @@ void
thread_unsuspend(struct proc *p)
{
struct thread *td;
+ int wakeup_swapper;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
+ wakeup_swapper = 0;
if (!P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td);
}
thread_unlock(td);
}
@@ -943,9 +960,11 @@ thread_unsuspend(struct proc *p)
* let it continue.
*/
thread_lock(p->p_singlethread);
- thread_unsuspend_one(p->p_singlethread);
+ wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
thread_unlock(p->p_singlethread);
}
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -956,6 +975,7 @@ thread_single_end(void)
{
struct thread *td;
struct proc *p;
+ int wakeup_swapper;
td = curthread;
p = td->td_proc;
@@ -963,8 +983,9 @@ thread_single_end(void)
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
PROC_SLOCK(p);
p->p_singlethread = NULL;
+ wakeup_swapper = 0;
/*
- * If there are other threads they mey now run,
+ * If there are other threads they may now run,
* unless of course there is a blanket 'stop order'
* on the process. The single threader must be allowed
* to continue however as this is a bad place to stop.
@@ -973,12 +994,14 @@ thread_single_end(void)
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td);
}
thread_unlock(td);
}
}
PROC_SUNLOCK(p);
+ if (wakeup_swapper)
+ kick_proc0();
}
struct thread *
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 5e99d22184801..e4c6aa03368ba 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -156,7 +156,7 @@ static int sleepq_check_timeout(void);
static void sleepq_dtor(void *mem, int size, void *arg);
#endif
static int sleepq_init(void *mem, int size, int flags);
-static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
+static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
int pri);
static void sleepq_switch(void *wchan);
static void sleepq_timeout(void *arg);
@@ -427,7 +427,15 @@ sleepq_catch_signals(void *wchan)
*/
if (TD_ON_SLEEPQ(td)) {
sq = sleepq_lookup(wchan);
- sleepq_resume_thread(sq, td, -1);
+ if (sleepq_resume_thread(sq, td, -1)) {
+#ifdef INVARIANTS
+ /*
+ * This thread hasn't gone to sleep yet, so it
+ * should not be swapped out.
+ */
+ panic("not waking up swapper");
+#endif
+ }
}
mtx_unlock_spin(&sc->sc_lock);
MPASS(td->td_lock != &sc->sc_lock);
@@ -467,7 +475,15 @@ sleepq_switch(void *wchan)
if (td->td_flags & TDF_TIMEOUT) {
MPASS(TD_ON_SLEEPQ(td));
sq = sleepq_lookup(wchan);
- sleepq_resume_thread(sq, td, -1);
+ if (sleepq_resume_thread(sq, td, -1)) {
+#ifdef INVARIANTS
+ /*
+ * This thread hasn't gone to sleep yet, so it
+ * should not be swapped out.
+ */
+ panic("not waking up swapper");
+#endif
+ }
mtx_unlock_spin(&sc->sc_lock);
return;
}
@@ -627,7 +643,7 @@ sleepq_timedwait_sig(void *wchan)
* Removes a thread from a sleep queue and makes it
* runnable.
*/
-static void
+static int
sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
{
struct sleepqueue_chain *sc;
@@ -679,7 +695,7 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
if (pri != -1 && td->td_priority > pri)
sched_prio(td, pri);
- setrunnable(td);
+ return (setrunnable(td));
}
#ifdef INVARIANTS
@@ -718,18 +734,19 @@ sleepq_init(void *mem, int size, int flags)
/*
* Find the highest priority thread sleeping on a wait channel and resume it.
*/
-void
+int
sleepq_signal(void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;
struct thread *td, *besttd;
+ int wakeup_swapper;
CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sq = sleepq_lookup(wchan);
if (sq == NULL)
- return;
+ return (0);
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
@@ -746,18 +763,20 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
}
MPASS(besttd != NULL);
thread_lock(besttd);
- sleepq_resume_thread(sq, besttd, pri);
+ wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
thread_unlock(besttd);
+ return (wakeup_swapper);
}
/*
* Resume all threads sleeping on a specified wait channel.
*/
-void
+int
sleepq_broadcast(void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;
struct thread *td;
+ int wakeup_swapper;
CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
@@ -765,19 +784,22 @@ sleepq_broadcast(void *wchan, int flags, int pri, int queue)
sq = sleepq_lookup(wchan);
if (sq == NULL) {
sleepq_release(wchan);
- return;
+ return (0);
}
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* Resume all blocked threads on the sleep queue. */
+ wakeup_swapper = 0;
while (!TAILQ_EMPTY(&sq->sq_blocked[queue])) {
td = TAILQ_FIRST(&sq->sq_blocked[queue]);
thread_lock(td);
- sleepq_resume_thread(sq, td, pri);
+ if (sleepq_resume_thread(sq, td, pri))
+ wakeup_swapper = 1;
thread_unlock(td);
}
sleepq_release(wchan);
+ return (wakeup_swapper);
}
/*
@@ -791,8 +813,10 @@ sleepq_timeout(void *arg)
struct sleepqueue *sq;
struct thread *td;
void *wchan;
+ int wakeup_swapper;
td = arg;
+ wakeup_swapper = 0;
CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
@@ -808,8 +832,10 @@ sleepq_timeout(void *arg)
sq = sleepq_lookup(wchan);
MPASS(sq != NULL);
td->td_flags |= TDF_TIMEOUT;
- sleepq_resume_thread(sq, td, -1);
+ wakeup_swapper = sleepq_resume_thread(sq, td, -1);
thread_unlock(td);
+ if (wakeup_swapper)
+ kick_proc0();
return;
}
@@ -838,10 +864,12 @@ sleepq_timeout(void *arg)
MPASS(TD_IS_SLEEPING(td));
td->td_flags &= ~TDF_TIMEOUT;
TD_CLR_SLEEPING(td);
- setrunnable(td);
+ wakeup_swapper = setrunnable(td);
} else
td->td_flags |= TDF_TIMOFAIL;
thread_unlock(td);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -852,6 +880,7 @@ void
sleepq_remove(struct thread *td, void *wchan)
{
struct sleepqueue *sq;
+ int wakeup_swapper;
/*
* Look up the sleep queue for this wait channel, then re-check
@@ -875,16 +904,18 @@ sleepq_remove(struct thread *td, void *wchan)
thread_lock(td);
MPASS(sq != NULL);
MPASS(td->td_wchan == wchan);
- sleepq_resume_thread(sq, td, -1);
+ wakeup_swapper = sleepq_resume_thread(sq, td, -1);
thread_unlock(td);
sleepq_release(wchan);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
* Abort a thread as if an interrupt had occurred. Only abort
* interruptible waits (unfortunately it isn't safe to abort others).
*/
-void
+int
sleepq_abort(struct thread *td, int intrval)
{
struct sleepqueue *sq;
@@ -900,7 +931,7 @@ sleepq_abort(struct thread *td, int intrval)
* timeout is scheduled anyhow.
*/
if (td->td_flags & TDF_TIMEOUT)
- return;
+ return (0);
CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
@@ -912,14 +943,14 @@ sleepq_abort(struct thread *td, int intrval)
* we have to do it here.
*/
if (!TD_IS_SLEEPING(td))
- return;
+ return (0);
wchan = td->td_wchan;
MPASS(wchan != NULL);
sq = sleepq_lookup(wchan);
MPASS(sq != NULL);
/* Thread is asleep on sleep queue sq, so wake it up. */
- sleepq_resume_thread(sq, td, -1);
+ return (sleepq_resume_thread(sq, td, -1));
}
#ifdef DDB
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index f66f68ad770a2..9104b41f115b6 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -857,7 +857,7 @@ void pstats_free(struct pstats *ps);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void sessrele(struct session *);
-void setrunnable(struct thread *);
+int setrunnable(struct thread *);
void setsugid(struct proc *p);
int sigonstack(size_t sp);
void sleepinit(void);
@@ -914,7 +914,7 @@ struct thread *thread_switchout(struct thread *td, int flags,
struct thread *newtd);
void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
-void thread_unsuspend_one(struct thread *td);
+int thread_unsuspend_one(struct thread *td);
void thread_unthread(struct thread *td);
int thread_userret(struct thread *td, struct trapframe *frame);
void thread_user_enter(struct thread *td);
diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h
index 58d667e3ddf7a..745136d48eb0f 100644
--- a/sys/sys/sleepqueue.h
+++ b/sys/sys/sleepqueue.h
@@ -90,17 +90,17 @@ struct thread;
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
void init_sleepqueues(void);
-void sleepq_abort(struct thread *td, int intrval);
+int sleepq_abort(struct thread *td, int intrval);
void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg,
int flags, int queue);
struct sleepqueue *sleepq_alloc(void);
-void sleepq_broadcast(void *wchan, int flags, int pri, int queue);
+int sleepq_broadcast(void *wchan, int flags, int pri, int queue);
void sleepq_free(struct sleepqueue *sq);
void sleepq_lock(void *wchan);
struct sleepqueue *sleepq_lookup(void *wchan);
void sleepq_release(void *wchan);
void sleepq_remove(struct thread *td, void *wchan);
-void sleepq_signal(void *wchan, int flags, int pri, int queue);
+int sleepq_signal(void *wchan, int flags, int pri, int queue);
void sleepq_set_timeout(void *wchan, int timo);
int sleepq_timedwait(void *wchan);
int sleepq_timedwait_sig(void *wchan);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 80d40e4df2091..b59cfb779c615 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -116,10 +116,6 @@ static int swapout(struct proc *);
static void swapclear(struct proc *);
#endif
-
-static volatile int proc0_rescan;
-
-
/*
* MPSAFE
*
@@ -686,9 +682,6 @@ scheduler(dummy)
loop:
if (vm_page_count_min()) {
VM_WAIT;
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
@@ -737,13 +730,7 @@ loop:
* Nothing to do, back to sleep.
*/
if ((p = pp) == NULL) {
- thread_lock(&thread0);
- if (!proc0_rescan) {
- TD_SET_IWAIT(&thread0);
- mi_switch(SW_VOL, NULL);
- }
- proc0_rescan = 0;
- thread_unlock(&thread0);
+ tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
goto loop;
}
PROC_LOCK(p);
@@ -755,9 +742,6 @@ loop:
*/
if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
PROC_UNLOCK(p);
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
@@ -767,31 +751,15 @@ loop:
*/
faultin(p);
PROC_UNLOCK(p);
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
-void kick_proc0(void)
+void
+kick_proc0(void)
{
- struct thread *td = &thread0;
-
- /* XXX This will probably cause a LOR in some cases */
- thread_lock(td);
- if (TD_AWAITING_INTR(td)) {
- CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
- TD_CLR_IWAIT(td);
- sched_add(td, SRQ_INTR);
- } else {
- proc0_rescan = 1;
- CTR2(KTR_INTR, "%s: state %d",
- __func__, td->td_state);
- }
- thread_unlock(td);
-
-}
+ wakeup(&proc0);
+}
#ifndef NO_SWAPPING
@@ -991,7 +959,16 @@ swapclear(p)
td->td_flags &= ~TDF_SWAPINREQ;
TD_CLR_SWAPPED(td);
if (TD_CAN_RUN(td))
- setrunnable(td);
+ if (setrunnable(td)) {
+#ifdef INVARIANTS
+ /*
+ * XXX: We just cleared TDI_SWAPPED
+ * above and set TDF_INMEM, so this
+ * should never happen.
+ */
+ panic("not waking up swapper");
+#endif
+ }
thread_unlock(td);
}
p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);