summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2020-01-23 01:24:47 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2020-01-23 01:24:47 +0000
commit511d1afb6bfe5f0366692c419c7d76c55076f39f (patch)
tree058d81067f0fd16cfe9fe838b0a524ac64bfa165
parentc4eb66309fb2d27bb1051f5a5aa326f9a8067d3f (diff)
Notes
-rw-r--r--sys/kern/kern_intr.c24
-rw-r--r--sys/sys/interrupt.h1
2 files changed, 23 insertions, 2 deletions
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 96ae0023debe..523811f38da6 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/epoch.h>
#include <sys/random.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
@@ -94,6 +95,9 @@ static int intr_storm_threshold = 0;
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
&intr_storm_threshold, 0,
"Number of consecutive interrupts before storm protection is enabled");
+static int intr_epoch_batch = 1000;
+SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
+ 0, "Maximum interrupt handler executions without re-entering epoch(9)");
static TAILQ_HEAD(, intr_event) event_list =
TAILQ_HEAD_INITIALIZER(event_list);
static struct mtx event_lock;
@@ -587,6 +591,8 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
ih->ih_flags |= IH_MPSAFE;
if (flags & INTR_ENTROPY)
ih->ih_flags |= IH_ENTROPY;
+ if (flags & INTR_TYPE_NET)
+ ih->ih_flags |= IH_NET;
/* We can only have one exclusive handler in a event. */
mtx_lock(&ie->ie_lock);
@@ -1196,11 +1202,12 @@ ithread_execute_handlers(struct proc *p, struct intr_event *ie)
static void
ithread_loop(void *arg)
{
+ struct epoch_tracker et;
struct intr_thread *ithd;
struct intr_event *ie;
struct thread *td;
struct proc *p;
- int wake;
+ int wake, epoch_count;
td = curthread;
p = td->td_proc;
@@ -1235,8 +1242,21 @@ ithread_loop(void *arg)
* that the load of ih_need in ithread_execute_handlers()
* is ordered after the load of it_need here.
*/
- while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
+ if (ie->ie_hflags & IH_NET) {
+ epoch_count = 0;
+ NET_EPOCH_ENTER(et);
+ }
+ while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
ithread_execute_handlers(p, ie);
+ if ((ie->ie_hflags & IH_NET) &&
+ ++epoch_count >= intr_epoch_batch) {
+ NET_EPOCH_EXIT(et);
+ epoch_count = 0;
+ NET_EPOCH_ENTER(et);
+ }
+ }
+ if (ie->ie_hflags & IH_NET)
+ NET_EPOCH_EXIT(et);
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
mtx_assert(&Giant, MA_NOTOWNED);
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index cbbb26176bcf..3492b17bf8d4 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -58,6 +58,7 @@ struct intr_handler {
};
/* Interrupt handle flags kept in ih_flags */
+#define IH_NET 0x00000001 /* Network. */
#define IH_EXCLUSIVE 0x00000002 /* Exclusive interrupt. */
#define IH_ENTROPY 0x00000004 /* Device is a good entropy source. */
#define IH_DEAD 0x00000008 /* Handler should be removed. */