diff options
| -rw-r--r-- | sys/kern/sched_4bsd.c | 5 | ||||
| -rw-r--r-- | sys/kern/sched_ule.c | 2 |
2 files changed, 5 insertions, 2 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 219405f97b2d..ee203841edbc 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -378,7 +378,6 @@ resetpriority(struct ksegrp *kg) register unsigned int newpriority; struct thread *td; - mtx_lock_spin(&sched_lock); if (kg->kg_pri_class == PRI_TIMESHARE) { newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); @@ -389,7 +388,6 @@ resetpriority(struct ksegrp *kg) FOREACH_THREAD_IN_GROUP(kg, td) { maybe_resched(td); /* XXXKSE silly */ } - mtx_unlock_spin(&sched_lock); } /* ARGSUSED */ @@ -514,6 +512,9 @@ sched_fork_thread(struct thread *td, struct thread *child) void sched_nice(struct ksegrp *kg, int nice) { + + PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); + mtx_assert(&sched_lock, MA_OWNED); kg->kg_nice = nice; resetpriority(kg); } diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 130552762597..aa40de846d4e 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -693,6 +693,8 @@ sched_nice(struct ksegrp *kg, int nice) struct thread *td; struct kseq *kseq; + PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); + mtx_assert(&sched_lock, MA_OWNED); /* * We need to adjust the nice counts for running KSEs. */ |
