From 36ec198bd57d045226736d721ca267bb211ba5d0 Mon Sep 17 00:00:00 2001 From: David Xu Date: Thu, 15 Jun 2006 06:37:39 +0000 Subject: Add scheduler API sched_relinquish(), the API is used to implement yield() and sched_yield() syscalls. Every scheduler has its own way to relinquish cpu, the ULE and CORE schedulers have two internal run- queues, a timesharing thread which calls yield() syscall should be moved to inactive queue. --- sys/kern/ksched.c | 4 +--- sys/kern/sched_4bsd.c | 15 +++++++++++++++ sys/kern/sched_core.c | 15 +++++++++++++++ sys/kern/sched_ule.c | 13 +++++++++++++ sys/posix4/ksched.c | 4 +--- sys/sys/sched.h | 1 + 6 files changed, 46 insertions(+), 6 deletions(-) diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c index c4a5f0b60ff3..7884d903b459 100644 --- a/sys/kern/ksched.c +++ b/sys/kern/ksched.c @@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) int ksched_yield(register_t *ret, struct ksched *ksched) { - mtx_lock_spin(&sched_lock); - curthread->td_flags |= TDF_NEEDRESCHED; - mtx_unlock_spin(&sched_lock); + sched_relinquish(curthread); return 0; } diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 60f6df6a5d14..399c9e51052c 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1354,6 +1354,19 @@ sched_is_bound(struct thread *td) return (td->td_kse->ke_flags & KEF_BOUND); } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (kg->kg_pri_class == PRI_TIMESHARE) + sched_prio(td, PRI_MAX_TIMESHARE); + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_load(void) { @@ -1365,11 +1378,13 @@ sched_sizeof_ksegrp(void) { return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); } + int sched_sizeof_proc(void) { return (sizeof(struct proc)); } + int sched_sizeof_thread(void) { diff --git a/sys/kern/sched_core.c b/sys/kern/sched_core.c index 3f5554b00189..494e04fd05f3 100644 --- a/sys/kern/sched_core.c +++ b/sys/kern/sched_core.c @@ -2310,6 +2310,21 @@ sched_load(void) #endif } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (sched_is_timeshare(kg)) { + sched_prio(td, PRI_MAX_TIMESHARE); + td->td_kse->ke_flags |= KEF_NEXTRQ; + } + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_sizeof_ksegrp(void) { diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index a3cccfe89e52..a352445b209f 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1974,6 +1974,19 @@ sched_is_bound(struct thread *td) return (td->td_kse->ke_flags & KEF_BOUND); } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (kg->kg_pri_class == PRI_TIMESHARE) + sched_prio(td, PRI_MAX_TIMESHARE); + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_load(void) { diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c index c4a5f0b60ff3..7884d903b459 100644 --- a/sys/posix4/ksched.c +++ b/sys/posix4/ksched.c @@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) int ksched_yield(register_t *ret, struct ksched *ksched) { - mtx_lock_spin(&sched_lock); - curthread->td_flags |= TDF_NEEDRESCHED; - mtx_unlock_spin(&sched_lock); + sched_relinquish(curthread); return 0; } diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 5c5825fff45f..b782c1f62079 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -79,6 +79,7 @@ void sched_add(struct thread *td, int flags); void sched_clock(struct thread *td); void sched_rem(struct thread *td); void sched_tick(void); +void sched_relinquish(struct thread *td); /* * Binding makes cpu affinity permanent while pinning is used to temporarily -- cgit v1.2.3