summaryrefslogtreecommitdiff
path: root/sys/kern/kern_intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_intr.c')
-rw-r--r--sys/kern/kern_intr.c242
1 files changed, 225 insertions, 17 deletions
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index c55b294bc5e7..649e4ccb4239 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -32,11 +32,18 @@
#include <sys/bus.h>
#include <sys/rtprio.h>
#include <sys/systm.h>
-#include <sys/malloc.h>
-
-#include <machine/ipl.h>
-
+#include <sys/ipl.h>
#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/unistd.h>
+#include <sys/vmmeter.h>
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+#include <machine/mutex.h>
struct swilist {
swihand_t *sl_handler;
@@ -44,6 +51,13 @@ struct swilist {
};
static struct swilist swilists[NSWI];
+u_long softintr_count[NSWI];
+static struct proc *softithd;
+volatile u_int sdelayed;
+volatile u_int spending;
+
+static void start_softintr(void *);
+static void intr_soft(void *);
void
register_swi(intr, handler)
@@ -53,18 +67,18 @@ register_swi(intr, handler)
struct swilist *slp, *slq;
int s;
- if (intr < NHWI || intr >= NHWI + NSWI)
+ if (intr < 0 || intr >= NSWI)
panic("register_swi: bad intr %d", intr);
if (handler == swi_generic || handler == swi_null)
panic("register_swi: bad handler %p", (void *)handler);
- slp = &swilists[intr - NHWI];
+ slp = &swilists[intr];
s = splhigh();
- if (ihandlers[intr] == swi_null)
- ihandlers[intr] = handler;
+ if (shandlers[intr] == swi_null)
+ shandlers[intr] = handler;
else {
if (slp->sl_next == NULL) {
- slp->sl_handler = ihandlers[intr];
- ihandlers[intr] = swi_generic;
+ slp->sl_handler = shandlers[intr];
+ shandlers[intr] = swi_generic;
}
slq = malloc(sizeof(*slq), M_DEVBUF, M_NOWAIT);
if (slq == NULL)
@@ -84,7 +98,7 @@ swi_dispatcher(intr)
{
struct swilist *slp;
- slp = &swilists[intr - NHWI];
+ slp = &swilists[intr];
do {
(*slp->sl_handler)();
slp = slp->sl_next;
@@ -99,21 +113,21 @@ unregister_swi(intr, handler)
struct swilist *slfoundpred, *slp, *slq;
int s;
- if (intr < NHWI || intr >= NHWI + NSWI)
+ if (intr < 0 || intr >= NSWI)
panic("unregister_swi: bad intr %d", intr);
if (handler == swi_generic || handler == swi_null)
panic("unregister_swi: bad handler %p", (void *)handler);
- slp = &swilists[intr - NHWI];
+ slp = &swilists[intr];
s = splhigh();
- if (ihandlers[intr] == handler)
- ihandlers[intr] = swi_null;
+ if (shandlers[intr] == handler)
+ shandlers[intr] = swi_null;
else if (slp->sl_next != NULL) {
slfoundpred = NULL;
for (slq = slp->sl_next; slq != NULL;
slp = slq, slq = slp->sl_next)
if (slq->sl_handler == handler)
slfoundpred = slp;
- slp = &swilists[intr - NHWI];
+ slp = &swilists[intr];
if (slfoundpred != NULL) {
slq = slfoundpred->sl_next;
slfoundpred->sl_next = slq->sl_next;
@@ -125,7 +139,7 @@ unregister_swi(intr, handler)
free(slq, M_DEVBUF);
}
if (slp->sl_next == NULL)
- ihandlers[intr] = slp->sl_handler;
+ shandlers[intr] = slp->sl_handler;
}
splx(s);
}
@@ -167,3 +181,197 @@ ithread_priority(flags)
return pri;
}
+/*
+ * Schedule the soft interrupt handler thread.
+ */
+void
+sched_softintr(void)
+{
+ atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
+
+ /*
+ * If we don't have an interrupt resource or an interrupt thread for
+ * this IRQ, log it as a stray interrupt.
+ */
+ if (softithd == NULL)
+ panic("soft interrupt scheduled too early");
+
+ CTR3(KTR_INTR, "sched_softintr pid %d(%s) spending=0x%x",
+ softithd->p_pid, softithd->p_comm, spending);
+
+ /*
+ * Get the sched lock and see if the thread is on whichkqs yet.
+ * If not, put it on there. In any case, kick everyone so that if
+ * the new thread is higher priority than their current thread, it
+ * gets run now.
+ */
+ mtx_enter(&sched_lock, MTX_SPIN);
+ if (softithd->p_stat == SWAIT) { /* not on run queue */
+ CTR1(KTR_INTR, "sched_softintr: setrunqueue %d",
+ softithd->p_pid);
+/* membar_lock(); */
+ softithd->p_stat = SRUN;
+ setrunqueue(softithd);
+ aston();
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+#if 0
+ aston(); /* ??? check priorities first? */
+#else
+ need_resched();
+#endif
+}
+
+SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
+
+/*
+ * Start soft interrupt thread.
+ */
+static void
+start_softintr(dummy)
+ void *dummy;
+{
+ int error;
+
+ if (softithd != NULL) { /* we already have a thread */
+ printf("start_softintr: already running");
+ return;
+ }
+
+ error = kthread_create(intr_soft, NULL, &softithd,
+ RFSTOPPED | RFHIGHPID, "softinterrupt");
+ if (error)
+ panic("start_softintr: kthread_create error %d\n", error);
+
+ softithd->p_rtprio.type = RTP_PRIO_ITHREAD;
+ softithd->p_rtprio.prio = PI_SOFT; /* soft interrupt */
+ softithd->p_stat = SWAIT; /* we're idle */
+ softithd->p_flag |= P_NOLOAD;
+}
+
+/*
+ * Software interrupt process code.
+ */
+static void
+intr_soft(dummy)
+ void *dummy;
+{
+ int i;
+ u_int pend;
+
+ /* Main loop */
+ for (;;) {
+ CTR3(KTR_INTR, "intr_soft pid %d(%s) spending=0x%x",
+ curproc->p_pid, curproc->p_comm, spending);
+
+ /*
+ * Service interrupts. If another interrupt arrives
+ * while we are running, they will set spending to
+ * denote that we should make another pass.
+ */
+ pend = atomic_readandclear_int(&spending);
+ while ((i = ffs(pend))) {
+ i--;
+ atomic_add_long(&softintr_count[i], 1);
+ pend &= ~ (1 << i);
+ mtx_enter(&Giant, MTX_DEF);
+ if (shandlers[i] == swi_generic)
+ swi_dispatcher(i);
+ else
+ (shandlers[i])();
+ mtx_exit(&Giant, MTX_DEF);
+ }
+ /*
+ * Processed all our interrupts. Now get the sched
+ * lock. This may take a while and spending may get
+ * set again, so we have to check it again.
+ */
+ mtx_enter(&sched_lock, MTX_SPIN);
+ if (spending == 0) {
+ CTR1(KTR_INTR, "intr_soft pid %d: done",
+ curproc->p_pid);
+ curproc->p_stat = SWAIT; /* we're idle */
+ mi_switch();
+ CTR1(KTR_INTR, "intr_soft pid %d: resumed",
+ curproc->p_pid);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ }
+}
+
+/*
+ * Bits in the spending bitmap variable must be set atomically because
+ * spending may be manipulated by interrupts or other cpu's without holding
+ * any locks.
+ *
+ * Note: setbits uses a locked or, making simple cases MP safe.
+ */
+#define DO_SETBITS(name, var, bits) \
+void name(void) \
+{ \
+ atomic_set_int(var, bits); \
+ sched_softintr(); \
+}
+
+#define DO_SETBITS_AND_NO_MORE(name, var, bits) \
+void name(void) \
+{ \
+ atomic_set_int(var, bits); \
+}
+
+DO_SETBITS(setsoftcamnet,&spending, SWI_CAMNET_PENDING)
+DO_SETBITS(setsoftcambio,&spending, SWI_CAMBIO_PENDING)
+DO_SETBITS(setsoftclock, &spending, SWI_CLOCK_PENDING)
+DO_SETBITS(setsoftnet, &spending, SWI_NET_PENDING)
+DO_SETBITS(setsofttty, &spending, SWI_TTY_PENDING)
+DO_SETBITS(setsoftvm, &spending, SWI_VM_PENDING)
+DO_SETBITS(setsofttq, &spending, SWI_TQ_PENDING)
+
+DO_SETBITS_AND_NO_MORE(schedsoftcamnet, &sdelayed, SWI_CAMNET_PENDING)
+DO_SETBITS_AND_NO_MORE(schedsoftcambio, &sdelayed, SWI_CAMBIO_PENDING)
+DO_SETBITS_AND_NO_MORE(schedsoftnet, &sdelayed, SWI_NET_PENDING)
+DO_SETBITS_AND_NO_MORE(schedsofttty, &sdelayed, SWI_TTY_PENDING)
+DO_SETBITS_AND_NO_MORE(schedsoftvm, &sdelayed, SWI_VM_PENDING)
+DO_SETBITS_AND_NO_MORE(schedsofttq, &sdelayed, SWI_TQ_PENDING)
+
+void
+setdelayed(void)
+{
+ int pend;
+
+ pend = atomic_readandclear_int(&sdelayed);
+ if (pend != 0) {
+ atomic_set_int(&spending, pend);
+ sched_softintr();
+ }
+}
+
+intrmask_t
+softclockpending(void)
+{
+ return (spending & SWI_CLOCK_PENDING);
+}
+
+/*
+ * Dummy spl calls. The only reason for these is to not break
+ * all the code which expects to call them.
+ */
+void spl0 (void) {}
+void splx (intrmask_t x) {}
+intrmask_t splq(intrmask_t mask) { return 0; }
+intrmask_t splbio(void) { return 0; }
+intrmask_t splcam(void) { return 0; }
+intrmask_t splclock(void) { return 0; }
+intrmask_t splhigh(void) { return 0; }
+intrmask_t splimp(void) { return 0; }
+intrmask_t splnet(void) { return 0; }
+intrmask_t splsoftcam(void) { return 0; }
+intrmask_t splsoftcambio(void) { return 0; }
+intrmask_t splsoftcamnet(void) { return 0; }
+intrmask_t splsoftclock(void) { return 0; }
+intrmask_t splsofttty(void) { return 0; }
+intrmask_t splsoftvm(void) { return 0; }
+intrmask_t splsofttq(void) { return 0; }
+intrmask_t splstatclock(void) { return 0; }
+intrmask_t spltty(void) { return 0; }
+intrmask_t splvm(void) { return 0; }