summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Xu <davidxu@FreeBSD.org>2006-06-15 06:37:39 +0000
committerDavid Xu <davidxu@FreeBSD.org>2006-06-15 06:37:39 +0000
commit36ec198bd57d045226736d721ca267bb211ba5d0 (patch)
tree54e94fb49d889ce7133d0d6bc666318bb347cc77
parent2053c127059ce623bbe6033b59aaa76ebe729d64 (diff)
Notes
-rw-r--r--sys/kern/ksched.c4
-rw-r--r--sys/kern/sched_4bsd.c15
-rw-r--r--sys/kern/sched_core.c15
-rw-r--r--sys/kern/sched_ule.c13
-rw-r--r--sys/posix4/ksched.c4
-rw-r--r--sys/sys/sched.h1
6 files changed, 46 insertions, 6 deletions
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index c4a5f0b60ff3..7884d903b459 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
- mtx_lock_spin(&sched_lock);
- curthread->td_flags |= TDF_NEEDRESCHED;
- mtx_unlock_spin(&sched_lock);
+ sched_relinquish(curthread);
return 0;
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 60f6df6a5d14..399c9e51052c 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1354,6 +1354,19 @@ sched_is_bound(struct thread *td)
return (td->td_kse->ke_flags & KEF_BOUND);
}
+void
+sched_relinquish(struct thread *td)
+{
+ struct ksegrp *kg;
+
+ kg = td->td_ksegrp;
+ mtx_lock_spin(&sched_lock);
+ if (kg->kg_pri_class == PRI_TIMESHARE)
+ sched_prio(td, PRI_MAX_TIMESHARE);
+ mi_switch(SW_VOL, NULL);
+ mtx_unlock_spin(&sched_lock);
+}
+
int
sched_load(void)
{
@@ -1365,11 +1378,13 @@ sched_sizeof_ksegrp(void)
{
return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
}
+
int
sched_sizeof_proc(void)
{
return (sizeof(struct proc));
}
+
int
sched_sizeof_thread(void)
{
diff --git a/sys/kern/sched_core.c b/sys/kern/sched_core.c
index 3f5554b00189..494e04fd05f3 100644
--- a/sys/kern/sched_core.c
+++ b/sys/kern/sched_core.c
@@ -2310,6 +2310,21 @@ sched_load(void)
#endif
}
+void
+sched_relinquish(struct thread *td)
+{
+ struct ksegrp *kg;
+
+ kg = td->td_ksegrp;
+ mtx_lock_spin(&sched_lock);
+ if (sched_is_timeshare(kg)) {
+ sched_prio(td, PRI_MAX_TIMESHARE);
+ td->td_kse->ke_flags |= KEF_NEXTRQ;
+ }
+ mi_switch(SW_VOL, NULL);
+ mtx_unlock_spin(&sched_lock);
+}
+
int
sched_sizeof_ksegrp(void)
{
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a3cccfe89e52..a352445b209f 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1974,6 +1974,19 @@ sched_is_bound(struct thread *td)
return (td->td_kse->ke_flags & KEF_BOUND);
}
+void
+sched_relinquish(struct thread *td)
+{
+ struct ksegrp *kg;
+
+ kg = td->td_ksegrp;
+ mtx_lock_spin(&sched_lock);
+ if (kg->kg_pri_class == PRI_TIMESHARE)
+ sched_prio(td, PRI_MAX_TIMESHARE);
+ mi_switch(SW_VOL, NULL);
+ mtx_unlock_spin(&sched_lock);
+}
+
int
sched_load(void)
{
diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c
index c4a5f0b60ff3..7884d903b459 100644
--- a/sys/posix4/ksched.c
+++ b/sys/posix4/ksched.c
@@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
- mtx_lock_spin(&sched_lock);
- curthread->td_flags |= TDF_NEEDRESCHED;
- mtx_unlock_spin(&sched_lock);
+ sched_relinquish(curthread);
return 0;
}
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 5c5825fff45f..b782c1f62079 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -79,6 +79,7 @@ void sched_add(struct thread *td, int flags);
void sched_clock(struct thread *td);
void sched_rem(struct thread *td);
void sched_tick(void);
+void sched_relinquish(struct thread *td);
/*
* Binding makes cpu affinity permanent while pinning is used to temporarily