summaryrefslogtreecommitdiff
path: root/sys/kern/kern_switch.c
diff options
context:
space:
mode:
authorJake Burkholder <jake@FreeBSD.org>2001-02-12 00:20:08 +0000
committerJake Burkholder <jake@FreeBSD.org>2001-02-12 00:20:08 +0000
commitd5a08a6065a153f519dc5ada150c981d37f91345 (patch)
tree22feb65a2c5c3166f680bb7f0e4b1a6798b976c7 /sys/kern/kern_switch.c
parent216a89d6a40093f3db32638d4b73d9b72816823c (diff)
Notes
Diffstat (limited to 'sys/kern/kern_switch.c')
-rw-r--r--sys/kern/kern_switch.c344
1 files changed, 163 insertions, 181 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 7515ea8b576a..8374048b1d07 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -1,6 +1,8 @@
/*
* Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,225 +34,205 @@
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/proc.h>
-#include <sys/rtprio.h>
#include <sys/queue.h>
/*
- * We have NQS (32) run queues per scheduling class. For the normal
- * class, there are 128 priorities scaled onto these 32 queues. New
- * processes are added to the last entry in each queue, and processes
- * are selected for running by taking them from the head and maintaining
- * a simple FIFO arrangement.
- *
- * Interrupt, real time and idle priority processes have and explicit
- * 0-31 priority which maps directly onto their class queue index.
- * When a queue has something in it, the corresponding bit is set in
- * the queuebits variable, allowing a single read to determine the
- * state of all 32 queues and then a ffs() to find the first busy
- * queue.
- *
- * XXX This needs fixing. First, we only have one idle process, so we
- * hardly need 32 queues for it. Secondly, the number of classes
- * makes things unwieldy. We should be able to merge them into a
- * single 96 or 128 entry queue.
+ * Global run queue.
*/
-struct rq itqueues[NQS]; /* interrupt threads */
-struct rq rtqueues[NQS]; /* real time processes */
-struct rq queues[NQS]; /* time sharing processes */
-struct rq idqueues[NQS]; /* idle process */
-u_int32_t itqueuebits;
-u_int32_t rtqueuebits;
-u_int32_t queuebits;
-u_int32_t idqueuebits;
+static struct runq runq;
+SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
/*
- * Initialize the run queues at boot time.
+ * Wrappers which implement old interface; act on global run queue.
*/
-static void
-rqinit(void *dummy)
+
+struct proc *
+chooseproc(void)
{
- int i;
+ return runq_choose(&runq);
+}
- for (i = 0; i < NQS; i++) {
- TAILQ_INIT(&itqueues[i]);
- TAILQ_INIT(&rtqueues[i]);
- TAILQ_INIT(&queues[i]);
- TAILQ_INIT(&idqueues[i]);
- }
+int
+procrunnable(void)
+{
+ return runq_check(&runq);
+}
+
+void
+remrunqueue(struct proc *p)
+{
+ runq_remove(&runq, p);
}
-SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
-/*
- * setrunqueue() examines a process priority and class and inserts it on
- * the tail of it's appropriate run queue (based on class and priority).
- * This sets the queue busy bit.
- * The process must be runnable.
- * This must be called at splhigh().
- */
void
setrunqueue(struct proc *p)
{
- struct rq *q;
- u_int8_t pri;
+ runq_add(&runq, p);
+}
- mtx_assert(&sched_lock, MA_OWNED);
- KASSERT(p->p_stat == SRUN, ("setrunqueue: proc %p (%s) not SRUN", p, \
- p->p_comm));
+/*
+ * Clear the status bit of the queue corresponding to priority level pri,
+ * indicating that it is empty.
+ */
+static __inline void
+runq_clrbit(struct runq *rq, int pri)
+{
+ struct rqbits *rqb;
- /*
- * Decide which class we want to run. We now have four
- * queues, and this is becoming ugly. We should be able to
- * collapse the first three classes into a single contiguous
- * queue. XXX FIXME.
- */
- CTR4(KTR_PROC, "setrunqueue: proc %p (pid %d, %s), schedlock %lx",
- p, p->p_pid, p->p_comm, (long)sched_lock.mtx_lock);
- if (p->p_rtprio.type == RTP_PRIO_ITHREAD) { /* interrupt thread */
- pri = p->p_rtprio.prio;
- q = &itqueues[pri];
- itqueuebits |= 1 << pri;
- } else if (p->p_rtprio.type == RTP_PRIO_REALTIME || /* real time */
- p->p_rtprio.type == RTP_PRIO_FIFO) {
- pri = p->p_rtprio.prio;
- q = &rtqueues[pri];
- rtqueuebits |= 1 << pri;
- } else if (p->p_rtprio.type == RTP_PRIO_NORMAL) { /* time sharing */
- pri = p->p_priority >> 2;
- q = &queues[pri];
- queuebits |= 1 << pri;
- } else if (p->p_rtprio.type == RTP_PRIO_IDLE) { /* idle proc */
- pri = p->p_rtprio.prio;
- q = &idqueues[pri];
- idqueuebits |= 1 << pri;
- } else {
- panic("setrunqueue: invalid rtprio type %d", p->p_rtprio.type);
- }
- p->p_rqindex = pri; /* remember the queue index */
- TAILQ_INSERT_TAIL(q, p, p_procq);
+ rqb = &rq->rq_status;
+ CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
+ rqb->rqb_bits[RQB_WORD(pri)],
+ rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
+ RQB_BIT(pri), RQB_WORD(pri));
+ rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
+}
+
+/*
+ * Find the index of the first non-empty run queue. This is done by
+ * scanning the status bits, a set bit indicates a non-empty queue.
+ */
+static __inline int
+runq_findbit(struct runq *rq)
+{
+ struct rqbits *rqb;
+ int pri;
+ int i;
+
+ rqb = &rq->rq_status;
+ for (i = 0; i < RQB_LEN; i++)
+ if (rqb->rqb_bits[i]) {
+ pri = (RQB_FFS(rqb->rqb_bits[i]) - 1) +
+ (i << RQB_L2BPW);
+ CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
+ rqb->rqb_bits[i], i, pri);
+ return (pri);
+ }
+
+ return (-1);
+}
+
+/*
+ * Set the status bit of the queue corresponding to priority level pri,
+ * indicating that it is non-empty.
+ */
+static __inline void
+runq_setbit(struct runq *rq, int pri)
+{
+ struct rqbits *rqb;
+
+ rqb = &rq->rq_status;
+ CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
+ rqb->rqb_bits[RQB_WORD(pri)],
+ rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
+ RQB_BIT(pri), RQB_WORD(pri));
+ rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
}
/*
- * remrunqueue() removes a given process from the run queue that it is on,
- * clearing the queue busy bit if it becomes empty.
- * This must be called at splhigh().
+ * Add the process to the queue specified by its priority, and set the
+ * corresponding status bit.
*/
void
-remrunqueue(struct proc *p)
+runq_add(struct runq *rq, struct proc *p)
{
- struct rq *q;
- u_int32_t *which;
- u_int8_t pri;
+ struct rqhead *rqh;
+ int pri;
- CTR4(KTR_PROC, "remrunqueue: proc %p (pid %d, %s), schedlock %lx",
- p, p->p_pid, p->p_comm, (long)sched_lock.mtx_lock);
mtx_assert(&sched_lock, MA_OWNED);
- pri = p->p_rqindex;
- if (p->p_rtprio.type == RTP_PRIO_ITHREAD) {
- q = &itqueues[pri];
- which = &itqueuebits;
- } else if (p->p_rtprio.type == RTP_PRIO_REALTIME ||
- p->p_rtprio.type == RTP_PRIO_FIFO) {
- q = &rtqueues[pri];
- which = &rtqueuebits;
- } else if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
- q = &queues[pri];
- which = &queuebits;
- } else if (p->p_rtprio.type == RTP_PRIO_IDLE) {
- q = &idqueues[pri];
- which = &idqueuebits;
- } else {
- panic("remrunqueue: invalid rtprio type");
- }
- TAILQ_REMOVE(q, p, p_procq);
- if (TAILQ_EMPTY(q)) {
- KASSERT((*which & (1 << pri)) != 0,
- ("remrunqueue: remove from empty queue"));
- *which &= ~(1 << pri);
- }
+ KASSERT(p->p_stat == SRUN, ("runq_add: proc %p (%s) not SRUN",
+ p, p->p_comm));
+ pri = p->p_pri.pri_level / RQ_PPQ;
+ p->p_rqindex = pri;
+ runq_setbit(rq, pri);
+ rqh = &rq->rq_queues[pri];
+ CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
+ p, p->p_pri.pri_level, pri, rqh);
+ TAILQ_INSERT_TAIL(rqh, p, p_procq);
}
/*
- * procrunnable() returns a boolean true (non-zero) value if there are
- * any runnable processes. This is intended to be called from the idle
- * loop to avoid the more expensive (and destructive) chooseproc().
- *
- * MP SAFE. CALLED WITHOUT THE MP LOCK
- *
- * XXX I doubt this. It's possibly fail-safe, but there's obviously
- * the case here where one of the bits words gets loaded, the
- * processor gets preempted, and by the time it returns from this
- * function, some other processor has picked the runnable process.
- * What am I missing? (grog, 23 July 2000).
+ * Return true if there are runnable processes of any priority on the run
+ * queue, false otherwise. Has no side effects, does not modify the run
+ * queue structure.
*/
-u_int32_t
-procrunnable(void)
+int
+runq_check(struct runq *rq)
{
- return (itqueuebits || rtqueuebits || queuebits || idqueuebits);
+ struct rqbits *rqb;
+ int i;
+
+ rqb = &rq->rq_status;
+ for (i = 0; i < RQB_LEN; i++)
+ if (rqb->rqb_bits[i]) {
+ CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
+ rqb->rqb_bits[i], i);
+ return (1);
+ }
+ CTR0(KTR_RUNQ, "runq_check: empty");
+
+ return (0);
}
/*
- * chooseproc() selects the next process to run. Ideally, cpu_switch()
- * would have determined that there is a process available before calling
- * this, but it is not a requirement. The selected process is removed
- * from it's queue, and the queue busy bit is cleared if it becomes empty.
- * This must be called at splhigh().
- *
- * For SMP, trivial affinity is implemented by locating the first process
- * on the queue that has a matching lastcpu id. Since normal priorities
- * are mapped four priority levels per queue, this may allow the cpu to
- * choose a slightly lower priority process in order to preserve the cpu
- * caches.
+ * Find and remove the highest priority process from the run queue.
+ * If there are no runnable processes, the per-cpu idle process is
+ * returned. Will not return NULL under any circumstances.
*/
struct proc *
-chooseproc(void)
+runq_choose(struct runq *rq)
{
+ struct rqhead *rqh;
struct proc *p;
- struct rq *q;
- u_int32_t *which;
- u_int32_t pri;
-#ifdef SMP
- u_char id;
-#endif
+ int pri;
mtx_assert(&sched_lock, MA_OWNED);
- if (itqueuebits) {
- pri = ffs(itqueuebits) - 1;
- q = &itqueues[pri];
- which = &itqueuebits;
- } else if (rtqueuebits) {
- pri = ffs(rtqueuebits) - 1;
- q = &rtqueues[pri];
- which = &rtqueuebits;
- } else if (queuebits) {
- pri = ffs(queuebits) - 1;
- q = &queues[pri];
- which = &queuebits;
- } else if (idqueuebits) {
- pri = ffs(idqueuebits) - 1;
- q = &idqueues[pri];
- which = &idqueuebits;
- } else {
- CTR1(KTR_PROC, "chooseproc: idleproc, schedlock %lx",
- (long)sched_lock.mtx_lock);
- return PCPU_GET(idleproc);
- }
- p = TAILQ_FIRST(q);
-#ifdef SMP
- /* wander down the current run queue for this pri level for a match */
- id = PCPU_GET(cpuid);
- while (p->p_lastcpu != id) {
- p = TAILQ_NEXT(p, p_procq);
- if (p == NULL) {
- p = TAILQ_FIRST(q);
- break;
+ if ((pri = runq_findbit(rq)) != -1) {
+ rqh = &rq->rq_queues[pri];
+ p = TAILQ_FIRST(rqh);
+ CTR3(KTR_RUNQ, "runq_choose: pri=%d p=%p rqh=%p", pri, p, rqh);
+ TAILQ_REMOVE(rqh, p, p_procq);
+ if (TAILQ_EMPTY(rqh)) {
+ CTR0(KTR_RUNQ, "runq_choose: empty");
+ runq_clrbit(rq, pri);
}
+ return (p);
+ }
+ CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
+
+ return (PCPU_GET(idleproc));
+}
+
+/*
+ * Initialize a run structure.
+ */
+void
+runq_init(struct runq *rq)
+{
+ int i;
+
+ for (i = 0; i < RQ_NQS; i++)
+ TAILQ_INIT(&rq->rq_queues[i]);
+}
+
+/*
+ * Remove the process from the queue specified by its priority, and clear the
+ * corresponding status bit if the queue becomes empty.
+ */
+void
+runq_remove(struct runq *rq, struct proc *p)
+{
+ struct rqhead *rqh;
+ int pri;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ pri = p->p_rqindex;
+ rqh = &rq->rq_queues[pri];
+ CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
+ p, p->p_pri.pri_level, pri, rqh);
+ KASSERT(p != NULL, ("runq_remove: no proc on busy queue"));
+ TAILQ_REMOVE(rqh, p, p_procq);
+ if (TAILQ_EMPTY(rqh)) {
+ CTR0(KTR_RUNQ, "runq_remove: empty");
+ runq_clrbit(rq, pri);
}
-#endif
- CTR4(KTR_PROC, "chooseproc: proc %p (pid %d, %s), schedlock %lx",
- p, p->p_pid, p->p_comm, (long)sched_lock.mtx_lock);
- KASSERT(p, ("chooseproc: no proc on busy queue"));
- TAILQ_REMOVE(q, p, p_procq);
- if (TAILQ_EMPTY(q))
- *which &= ~(1 << pri);
- return p;
}