summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/subr_smp.c41
-rw-r--r--sys/sys/smp.h13
2 files changed, 54 insertions, 0 deletions
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 818858909d717..93df59f32ee09 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -884,6 +884,47 @@ smp_no_rendezvous_barrier(void *dummy)
#endif
}
+void
+smp_rendezvous_cpus_retry(cpuset_t map,
+ void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void (* wait_func)(void *, int),
+ struct smp_rendezvous_cpus_retry_arg *arg)
+{
+ int cpu;
+
+ /*
+ * Execute an action on all specified CPUs while retrying until they
+ * all acknowledge completion.
+ */
+ CPU_COPY(&map, &arg->cpus);
+ for (;;) {
+ smp_rendezvous_cpus(
+ arg->cpus,
+ setup_func,
+ action_func,
+ teardown_func,
+ arg);
+
+ if (CPU_EMPTY(&arg->cpus))
+ break;
+
+ CPU_FOREACH(cpu) {
+ if (!CPU_ISSET(cpu, &arg->cpus))
+ continue;
+ wait_func(arg, cpu);
+ }
+ }
+}
+
+void
+smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
+{
+
+ CPU_CLR_ATOMIC(curcpu, &arg->cpus);
+}
+
/*
* Wait for specified idle threads to switch once. This ensures that even
* preempted threads have cycled through the switch function once,
diff --git a/sys/sys/smp.h b/sys/sys/smp.h
index 212ae6c35e56c..a7ca84e92bc77 100644
--- a/sys/sys/smp.h
+++ b/sys/sys/smp.h
@@ -276,6 +276,19 @@ void smp_rendezvous_cpus(cpuset_t,
void (*)(void *),
void (*)(void *),
void *arg);
+
+struct smp_rendezvous_cpus_retry_arg {
+ cpuset_t cpus;
+};
+void smp_rendezvous_cpus_retry(cpuset_t,
+ void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void (*)(void *, int),
+ struct smp_rendezvous_cpus_retry_arg *);
+
+void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *);
+
#endif /* !LOCORE */
#endif /* _KERNEL */
#endif /* _SYS_SMP_H_ */