summaryrefslogtreecommitdiff
path: root/sys/kern/subr_smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/subr_smp.c')
-rw-r--r--sys/kern/subr_smp.c117
1 files changed, 90 insertions, 27 deletions
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 75378f8dad36..375aed7eb4ab 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -74,6 +74,9 @@ u_int mp_maxid;
SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
+SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD, &mp_maxid, 0,
+ "Max CPU ID.");
+
SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
"Max number of CPUs that the system was compiled for.");
@@ -104,10 +107,11 @@ SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
"Forwarding of roundrobin to all other CPUs");
/* Variables needed for SMP rendezvous. */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
+static volatile cpumask_t smp_rv_cpumask;
+static void (*volatile smp_rv_setup_func)(void *arg);
+static void (*volatile smp_rv_action_func)(void *arg);
+static void (*volatile smp_rv_teardown_func)(void *arg);
+static void * volatile smp_rv_func_arg;
static volatile int smp_rv_waiters[3];
/*
@@ -286,6 +290,14 @@ restart_cpus(cpumask_t map)
return 1;
}
+void
+smp_no_rendevous_barrier(void *dummy)
+{
+#ifdef SMP
+ KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
+#endif
+}
+
/*
* All-CPU rendezvous. CPUs are signalled, all execute the setup function
* (if specified), rendezvous, execute the action function (if specified),
@@ -298,41 +310,57 @@ restart_cpus(cpumask_t map)
void
smp_rendezvous_action(void)
{
+ cpumask_t map = smp_rv_cpumask;
+ int i, ncpus = 0;
+ void* local_func_arg = smp_rv_func_arg;
+ void (*local_setup_func)(void*) = smp_rv_setup_func;
+ void (*local_action_func)(void*) = smp_rv_action_func;
+ void (*local_teardown_func)(void*) = smp_rv_teardown_func;
+
+ for (i = 0; i < MAXCPU; i++)
+ if (((1 << i) & map) != 0 && pcpu_find(i) != NULL)
+ ncpus++;
/* Ensure we have up-to-date values. */
atomic_add_acq_int(&smp_rv_waiters[0], 1);
- while (smp_rv_waiters[0] < mp_ncpus)
+ while (smp_rv_waiters[0] < ncpus)
cpu_spinwait();
/* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
-
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (smp_rv_waiters[1] < mp_ncpus)
- cpu_spinwait();
+ if (local_setup_func != smp_no_rendevous_barrier) {
+ if (smp_rv_setup_func != NULL)
+ smp_rv_setup_func(smp_rv_func_arg);
+
+ /* spin on entry rendezvous */
+ atomic_add_int(&smp_rv_waiters[1], 1);
+ while (smp_rv_waiters[1] < ncpus)
+ cpu_spinwait();
+ }
/* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
+ if (local_action_func != NULL)
+ local_action_func(local_func_arg);
/* spin on exit rendezvous */
atomic_add_int(&smp_rv_waiters[2], 1);
- while (smp_rv_waiters[2] < mp_ncpus)
+ if (local_teardown_func == smp_no_rendevous_barrier)
+ return;
+ while (smp_rv_waiters[2] < ncpus)
cpu_spinwait();
/* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
+ if (local_teardown_func != NULL)
+ local_teardown_func(local_func_arg);
}
void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
+smp_rendezvous_cpus(cpumask_t map,
+ void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
{
+ int i, ncpus = 0;
if (!smp_started) {
if (setup_func != NULL)
@@ -343,11 +371,16 @@ smp_rendezvous(void (* setup_func)(void *),
teardown_func(arg);
return;
}
+
+ for (i = 0; i < MAXCPU; i++)
+ if (((1 << i) & map) != 0 && pcpu_find(i) != NULL)
+ ncpus++;
/* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
/* set static function pointers */
+ smp_rv_cpumask = map & ~(1 << curcpu);
smp_rv_setup_func = setup_func;
smp_rv_action_func = action_func;
smp_rv_teardown_func = teardown_func;
@@ -357,14 +390,29 @@ smp_rendezvous(void (* setup_func)(void *),
atomic_store_rel_int(&smp_rv_waiters[0], 0);
/* signal other processors, which will enter the IPI with interrupts off */
- ipi_all_but_self(IPI_RENDEZVOUS);
+ ipi_selected(map, IPI_RENDEZVOUS);
+
+ /* Check if the current CPU is in the map */
+ if ((map & (1 << curcpu)) != 0)
+ /* call executor function for the current CPU */
+ smp_rendezvous_action();
- /* call executor function */
- smp_rendezvous_action();
+ if (teardown_func == smp_no_rendevous_barrier)
+ while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
+ cpu_spinwait();
/* release lock */
mtx_unlock_spin(&smp_ipi_mtx);
}
+
+void
+smp_rendezvous(void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
+{
+ smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
+}
#else /* !SMP */
/*
@@ -383,9 +431,24 @@ SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
mp_setvariables_for_up, NULL);
void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
+smp_rendezvous_cpus(cpumask_t map,
+ void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
+ void *arg)
+{
+ if (setup_func != NULL)
+ setup_func(arg);
+ if (action_func != NULL)
+ action_func(arg);
+ if (teardown_func != NULL)
+ teardown_func(arg);
+}
+
+void
+smp_rendezvous(void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
void *arg)
{