diff options
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r-- | sys/kern/kern_synch.c | 211 |
1 files changed, 26 insertions, 185 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index f8baf85461cae..2d5105514a20f 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 - * $Id: kern_synch.c,v 1.71 1999/01/08 17:31:10 eivind Exp $ + * $Id: kern_synch.c,v 1.61 1998/07/15 02:32:10 bde Exp $ */ #include "opt_ktrace.h" @@ -138,9 +138,7 @@ static void roundrobin(arg) void *arg; { -#ifndef SMP struct proc *p = curproc; /* XXX */ -#endif #ifdef SMP need_resched(); @@ -225,8 +223,8 @@ roundrobin(arg) static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); -/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ -static int fscale __unused = FSCALE; +/* kernel uses `FSCALE', user uses `fscale' */ +static int fscale = FSCALE; SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); /* @@ -253,10 +251,9 @@ schedcpu(arg) { register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); register struct proc *p; - register int realstathz, s; + register int s; register unsigned int newcpu; - realstathz = stathz ? stathz : hz; for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { /* * Increment time in/out of memory and sleep time @@ -278,13 +275,13 @@ schedcpu(arg) * p_pctcpu is only for ps. */ #if (FSHIFT >= CCPU_SHIFT) - p->p_pctcpu += (realstathz == 100)? + p->p_pctcpu += (hz == 100)? ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 100 * (((fixpt_t) p->p_cpticks) - << (FSHIFT - CCPU_SHIFT)) / realstathz; + << (FSHIFT - CCPU_SHIFT)) / hz; #else p->p_pctcpu += ((FSCALE - ccpu) * - (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT; + (p->p_cpticks * FSCALE / hz)) >> FSHIFT; #endif p->p_cpticks = 0; newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; @@ -401,15 +398,15 @@ tsleep(ident, priority, wmesg, timo) splx(s); return (0); } - KASSERT(p != NULL, ("tsleep1")); - KASSERT(ident != NULL && p->p_stat == SRUN, ("tsleep")); - /* - * Process may be sitting on a slpque if asleep() was called, remove - * it before re-adding. - */ - if (p->p_wchan != NULL) - unsleep(p); - +#ifdef DIAGNOSTIC + if(p == NULL) + panic("tsleep1"); + if (ident == NULL || p->p_stat != SRUN) + panic("tsleep"); + /* XXX This is not exhaustive, just the most common case */ + if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p)) + panic("sleeping process already on another queue"); +#endif p->p_wchan = ident; p->p_wmesg = wmesg; p->p_slptime = 0; @@ -475,170 +472,7 @@ resume: } /* - * asleep() - async sleep call. Place process on wait queue and return - * immediately without blocking. The process stays runnable until await() - * is called. If ident is NULL, remove process from wait queue if it is still - * on one. - * - * Only the most recent sleep condition is effective when making successive - * calls to asleep() or when calling tsleep(). - * - * The timeout, if any, is not initiated until await() is called. The sleep - * priority, signal, and timeout is specified in the asleep() call but may be - * overriden in the await() call. - * - * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> - */ - -int -asleep(void *ident, int priority, const char *wmesg, int timo) -{ - struct proc *p = curproc; - int s; - - /* - * splhigh() while manipulating sleep structures and slpque. - * - * Remove preexisting wait condition (if any) and place process - * on appropriate slpque, but do not put process to sleep. - */ - - s = splhigh(); - - if (p->p_wchan != NULL) - unsleep(p); - - if (ident) { - p->p_wchan = ident; - p->p_wmesg = wmesg; - p->p_slptime = 0; - p->p_asleep.as_priority = priority; - p->p_asleep.as_timo = timo; - TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); - } - - splx(s); - - return(0); -} - -/* - * await() - wait for async condition to occur. The process blocks until - * wakeup() is called on the most recent asleep() address. If wakeup is called - * priority to await(), await() winds up being a NOP. - * - * If await() is called more then once (without an intervening asleep() call), - * await() is still effectively a NOP but it calls mi_switch() to give other - * processes some cpu before returning. The process is left runnable. - * - * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> - */ - -int -await(int priority, int timo) -{ - struct proc *p = curproc; - int s; - - s = splhigh(); - - if (p->p_wchan != NULL) { - struct callout_handle thandle; - int sig; - int catch; - - /* - * The call to await() can override defaults specified in - * the original asleep(). - */ - if (priority < 0) - priority = p->p_asleep.as_priority; - if (timo < 0) - timo = p->p_asleep.as_timo; - - /* - * Install timeout - */ - - if (timo) - thandle = timeout(endtsleep, (void *)p, timo); - - sig = 0; - catch = priority & PCATCH; - - if (catch) { - p->p_flag |= P_SINTR; - if ((sig = CURSIG(p))) { - if (p->p_wchan) - unsleep(p); - p->p_stat = SRUN; - goto resume; - } - if (p->p_wchan == NULL) { - catch = 0; - goto resume; - } - } - p->p_stat = SSLEEP; - p->p_stats->p_ru.ru_nvcsw++; - mi_switch(); -resume: - curpriority = p->p_usrpri; - - splx(s); - p->p_flag &= ~P_SINTR; - if (p->p_flag & P_TIMEOUT) { - p->p_flag &= ~P_TIMEOUT; - if (sig == 0) { -#ifdef KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); -#endif - return (EWOULDBLOCK); - } - } else if (timo) - untimeout(endtsleep, (void *)p, thandle); - if (catch && (sig != 0 || (sig = CURSIG(p)))) { -#ifdef KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); -#endif - if (p->p_sigacts->ps_sigintr & sigmask(sig)) - return (EINTR); - return (ERESTART); - } -#ifdef KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); -#endif - } else { - /* - * If as_priority is 0, await() has been called without an - * intervening asleep(). We are still effectively a NOP, - * but we call mi_switch() for safety. - */ - - if (p->p_asleep.as_priority == 0) { - p->p_stats->p_ru.ru_nvcsw++; - mi_switch(); - } - splx(s); - } - - /* - * clear p_asleep.as_priority as an indication that await() has been - * called. If await() is called again without an intervening asleep(), - * await() is still effectively a NOP but the above mi_switch() code - * is triggered as a safety. - */ - p->p_asleep.as_priority = 0; - - return (0); -} - -/* - * Implement timeout for tsleep or asleep()/await() - * + * Implement timeout for tsleep. * If process hasn't been awakened (wchan non-zero), * set timeout flag and undo the sleep. If proc * is stopped, just unsleep so it will remain stopped. @@ -694,6 +528,10 @@ wakeup(ident) qp = &slpque[LOOKUP(ident)]; restart: for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { +#ifdef DIAGNOSTIC + if (p->p_stat != SSLEEP && p->p_stat != SSTOP) + panic("wakeup"); +#endif if (p->p_wchan == ident) { TAILQ_REMOVE(qp, p, p_procq); p->p_wchan = 0; @@ -735,6 +573,10 @@ wakeup_one(ident) qp = &slpque[LOOKUP(ident)]; for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { +#ifdef DIAGNOSTIC + if (p->p_stat != SSLEEP && p->p_stat != SSTOP) + panic("wakeup_one"); +#endif if (p->p_wchan == ident) { TAILQ_REMOVE(qp, p, p_procq); p->p_wchan = 0; @@ -805,8 +647,7 @@ mi_switch() * Check if the process exceeds its cpu resource allocation. * If over max, kill it. */ - if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && - p->p_runtime > p->p_limit->p_cpulimit) { + if (p->p_stat != SZOMB && p->p_runtime > p->p_limit->p_cpulimit) { rlim = &p->p_rlimit[RLIMIT_CPU]; if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { killproc(p, "exceeded maximum CPU limit"); |