summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulian Elischer <julian@FreeBSD.org>2003-01-03 20:41:49 +0000
committerJulian Elischer <julian@FreeBSD.org>2003-01-03 20:41:49 +0000
commit03ea4720806504bfbc2832e7e804b732f93d8524 (patch)
tree82efe41e6de266ff54e62533667813d5e0a9a49f
parent3f5f24287f139ebf9a752dd4ef365ab5c04062eb (diff)
Notes
-rw-r--r--sys/kern/kern_kse.c41
-rw-r--r--sys/kern/kern_thread.c41
-rw-r--r--sys/sys/proc.h17
3 files changed, 53 insertions, 46 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 53a09af5cf38..492df0d159ce 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -362,20 +362,22 @@ kse_release(struct thread * td, struct kse_release_args * uap)
return (EINVAL);
PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
td->td_flags = TDF_UPCALLING; /* BOUND */
- if (kg->kg_completed == NULL) {
+ if (!(td->td_kse->ke_flags & KEF_DOUPCALL) &&
+ (kg->kg_completed == NULL)) {
/* XXXKSE also look for waiting signals etc. */
/*
* The KSE will however be lendable.
*/
- mtx_lock_spin(&sched_lock);
TD_SET_IDLE(td);
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
} else {
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
}
return (0);
@@ -397,21 +399,17 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
- PROC_LOCK(p);
+
+ mtx_lock_spin(&sched_lock);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_mailbox != uap->mbx)
continue;
- td2 = ke->ke_owner ;
+ td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner"));
- if (!TD_IS_IDLE(td2)) {
- /* Return silently if no longer idle */
- PROC_UNLOCK(p);
- return (0);
- }
break;
- }
+ }
if (td2) {
break;
}
@@ -421,24 +419,26 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
* look for any idle KSE to resurrect.
*/
kg = td->td_ksegrp;
- mtx_lock_spin(&sched_lock);
FOREACH_KSE_IN_GROUP(kg, ke) {
td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner2"));
if (TD_IS_IDLE(td2))
break;
}
+ KASSERT((td2 != NULL), ("no thread(s)"));
}
if (td2) {
- mtx_lock_spin(&sched_lock);
- PROC_UNLOCK(p);
- TD_CLR_IDLE(td2);
- setrunnable(td2);
+ if (TD_IS_IDLE(td2)) {
+ TD_CLR_IDLE(td2);
+ setrunnable(td2);
+ } else if (td != td2) {
+ /* guarantee do an upcall ASAP */
+ td2->td_kse->ke_flags |= KEF_DOUPCALL;
+ }
mtx_unlock_spin(&sched_lock);
return (0);
- }
+ }
mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
return (ESRCH);
}
@@ -1368,7 +1368,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
if (TD_CAN_UNBIND(td)) {
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
- if (!worktodo && (kg->kg_completed == NULL)) {
+ if (!worktodo && (kg->kg_completed == NULL) &&
+ !(td->td_kse->ke_flags & KEF_DOUPCALL)) {
/*
* This thread has not started any upcall.
* If there is no work to report other than
@@ -1477,11 +1478,13 @@ justreturn:
* when there is no more work to do.
* kse_reassign() will do that for us.
*/
- TD_SET_LOAN(td); /* XXXKSE may not be needed */
+ TD_SET_LOAN(td);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find worktodo */
}
td->td_flags &= ~TDF_UPCALLING;
+ if (ke->ke_flags & KEF_DOUPCALL)
+ ke->ke_flags &= ~KEF_DOUPCALL;
mtx_unlock_spin(&sched_lock);
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 53a09af5cf38..492df0d159ce 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -362,20 +362,22 @@ kse_release(struct thread * td, struct kse_release_args * uap)
return (EINVAL);
PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
td->td_flags = TDF_UPCALLING; /* BOUND */
- if (kg->kg_completed == NULL) {
+ if (!(td->td_kse->ke_flags & KEF_DOUPCALL) &&
+ (kg->kg_completed == NULL)) {
/* XXXKSE also look for waiting signals etc. */
/*
* The KSE will however be lendable.
*/
- mtx_lock_spin(&sched_lock);
TD_SET_IDLE(td);
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
} else {
+ mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
}
return (0);
@@ -397,21 +399,17 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
- PROC_LOCK(p);
+
+ mtx_lock_spin(&sched_lock);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_mailbox != uap->mbx)
continue;
- td2 = ke->ke_owner ;
+ td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner"));
- if (!TD_IS_IDLE(td2)) {
- /* Return silently if no longer idle */
- PROC_UNLOCK(p);
- return (0);
- }
break;
- }
+ }
if (td2) {
break;
}
@@ -421,24 +419,26 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
* look for any idle KSE to resurrect.
*/
kg = td->td_ksegrp;
- mtx_lock_spin(&sched_lock);
FOREACH_KSE_IN_GROUP(kg, ke) {
td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner2"));
if (TD_IS_IDLE(td2))
break;
}
+ KASSERT((td2 != NULL), ("no thread(s)"));
}
if (td2) {
- mtx_lock_spin(&sched_lock);
- PROC_UNLOCK(p);
- TD_CLR_IDLE(td2);
- setrunnable(td2);
+ if (TD_IS_IDLE(td2)) {
+ TD_CLR_IDLE(td2);
+ setrunnable(td2);
+ } else if (td != td2) {
+ /* guarantee do an upcall ASAP */
+ td2->td_kse->ke_flags |= KEF_DOUPCALL;
+ }
mtx_unlock_spin(&sched_lock);
return (0);
- }
+ }
mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
return (ESRCH);
}
@@ -1368,7 +1368,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
if (TD_CAN_UNBIND(td)) {
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
- if (!worktodo && (kg->kg_completed == NULL)) {
+ if (!worktodo && (kg->kg_completed == NULL) &&
+ !(td->td_kse->ke_flags & KEF_DOUPCALL)) {
/*
* This thread has not started any upcall.
* If there is no work to report other than
@@ -1477,11 +1478,13 @@ justreturn:
* when there is no more work to do.
* kse_reassign() will do that for us.
*/
- TD_SET_LOAN(td); /* XXXKSE may not be needed */
+ TD_SET_LOAN(td);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find worktodo */
}
td->td_flags &= ~TDF_UPCALLING;
+ if (ke->ke_flags & KEF_DOUPCALL)
+ ke->ke_flags &= ~KEF_DOUPCALL;
mtx_unlock_spin(&sched_lock);
/*
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 9f46569084be..82a491956ec5 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -347,14 +347,14 @@ struct thread {
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
-#define TDI_SUSPENDED 0x01 /* On suspension queue. */
-#define TDI_SLEEPING 0x02 /* Actually asleep! (tricky). */
-#define TDI_SWAPPED 0x04 /* Stack not in mem.. bad juju if run. */
-#define TDI_LOCK 0x08 /* Stopped on a lock. */
-#define TDI_IWAIT 0x10 /* Awaiting interrupt. */
-#define TDI_LOAN 0x20 /* bound thread's KSE is lent */
-#define TDI_IDLE 0x40 /* kse_release() made us surplus */
-#define TDI_EXITING 0x80 /* Thread is in exit processing */
+#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
+#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */
+#define TDI_SWAPPED 0x0004 /* Stack not in mem.. bad juju if run. */
+#define TDI_LOCK 0x0008 /* Stopped on a lock. */
+#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
+#define TDI_LOAN 0x0020 /* bound thread's KSE is lent */
+#define TDI_IDLE 0x0040 /* kse_release() made us surplus */
+#define TDI_EXITING 0x0080 /* Thread is in exit processing */
#define TD_IS_UNBOUND(td) ((td)->td_flags & TDF_UNBOUND)
#define TD_IS_BOUND(td) (!TD_IS_UNBOUND(td))
@@ -478,6 +478,7 @@ struct kse {
#define KEF_ONLOANQ 0x01000 /* KSE is on loan queue. */
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
#define KEF_EXIT 0x04000 /* KSE is being killed. */
+#define KEF_DOUPCALL 0x08000 /* KSE should do upcall now. */
/*
* (*) A bound KSE with a bound thread in a KSE process may be lent to