aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/random/random_harvestq.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/random/random_harvestq.c')
-rw-r--r--sys/dev/random/random_harvestq.c335
1 files changed, 277 insertions, 58 deletions
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index ee37bda36496..c7762967c4fb 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -88,6 +88,8 @@ static void random_sources_feed(void);
static __read_mostly bool epoch_inited;
static __read_mostly epoch_t rs_epoch;
+static const char *random_source_descr[ENTROPYSOURCE];
+
/*
* How many events to queue up. We create this many items in
* an 'empty' queue, then transfer them to the 'harvest' queue with
@@ -131,30 +133,14 @@ static struct harvest_context {
/* The context of the kernel thread processing harvested entropy */
struct proc *hc_kthread_proc;
/*
- * Lockless ring buffer holding entropy events
- * If ring.in == ring.out,
- * the buffer is empty.
- * If ring.in != ring.out,
- * the buffer contains harvested entropy.
- * If (ring.in + 1) == ring.out (mod RANDOM_RING_MAX),
- * the buffer is full.
- *
- * NOTE: ring.in points to the last added element,
- * and ring.out points to the last consumed element.
- *
- * The ring.in variable needs locking as there are multiple
- * sources to the ring. Only the sources may change ring.in,
- * but the consumer may examine it.
- *
- * The ring.out variable does not need locking as there is
- * only one consumer. Only the consumer may change ring.out,
- * but the sources may examine it.
+ * A pair of buffers for queued events. New events are added to the
+ * active queue while the kthread processes the other one in parallel.
*/
- struct entropy_ring {
+ struct entropy_buffer {
struct harvest_event ring[RANDOM_RING_MAX];
- volatile u_int in;
- volatile u_int out;
- } hc_entropy_ring;
+ u_int pos;
+ } hc_entropy_buf[2];
+ u_int hc_active_buf;
struct fast_entropy_accumulator {
volatile u_int pos;
uint32_t buf[RANDOM_ACCUM_MAX];
@@ -183,37 +169,41 @@ random_harvestq_fast_process_event(struct harvest_event *event)
static void
random_kthread(void)
{
- u_int maxloop, ring_out, i;
+ struct harvest_context *hc;
- /*
- * Locking is not needed as this is the only place we modify ring.out, and
- * we only examine ring.in without changing it. Both of these are volatile,
- * and this is a unique thread.
- */
+ hc = &harvest_context;
for (random_kthread_control = 1; random_kthread_control;) {
- /* Deal with events, if any. Restrict the number we do in one go. */
- maxloop = RANDOM_RING_MAX;
- while (harvest_context.hc_entropy_ring.out != harvest_context.hc_entropy_ring.in) {
- ring_out = (harvest_context.hc_entropy_ring.out + 1)%RANDOM_RING_MAX;
- random_harvestq_fast_process_event(harvest_context.hc_entropy_ring.ring + ring_out);
- harvest_context.hc_entropy_ring.out = ring_out;
- if (!--maxloop)
- break;
- }
+ struct entropy_buffer *buf;
+ u_int entries;
+
+ /* Deal with queued events. */
+ RANDOM_HARVEST_LOCK();
+ buf = &hc->hc_entropy_buf[hc->hc_active_buf];
+ entries = buf->pos;
+ buf->pos = 0;
+ hc->hc_active_buf = (hc->hc_active_buf + 1) %
+ nitems(hc->hc_entropy_buf);
+ RANDOM_HARVEST_UNLOCK();
+ for (u_int i = 0; i < entries; i++)
+ random_harvestq_fast_process_event(&buf->ring[i]);
+
+ /* Poll sources of noise. */
random_sources_feed();
+
/* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */
- for (i = 0; i < RANDOM_ACCUM_MAX; i++) {
- if (harvest_context.hc_entropy_fast_accumulator.buf[i]) {
- random_harvest_direct(harvest_context.hc_entropy_fast_accumulator.buf + i, sizeof(harvest_context.hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA);
- harvest_context.hc_entropy_fast_accumulator.buf[i] = 0;
+ for (u_int i = 0; i < RANDOM_ACCUM_MAX; i++) {
+ if (hc->hc_entropy_fast_accumulator.buf[i]) {
+ random_harvest_direct(&hc->hc_entropy_fast_accumulator.buf[i],
+ sizeof(hc->hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA);
+ hc->hc_entropy_fast_accumulator.buf[i] = 0;
}
}
/* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */
- tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-",
+ tsleep_sbt(&hc->hc_kthread_proc, 0, "-",
SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1));
}
random_kthread_control = -1;
- wakeup(&harvest_context.hc_kthread_proc);
+ wakeup(&hc->hc_kthread_proc);
kproc_exit(0);
/* NOTREACHED */
}
@@ -311,6 +301,230 @@ random_sources_feed(void)
explicit_bzero(entropy, sizeof(entropy));
}
+/*
+ * State used for conducting NIST SP 800-90B health tests on entropy sources.
+ */
+static struct health_test_softc {
+ uint32_t ht_rct_value[HARVESTSIZE + 1];
+ u_int ht_rct_count; /* number of samples with the same value */
+ u_int ht_rct_limit; /* constant after init */
+
+ uint32_t ht_apt_value[HARVESTSIZE + 1];
+ u_int ht_apt_count; /* number of samples with the same value */
+ u_int ht_apt_seq; /* sequence number of the last sample */
+ u_int ht_apt_cutoff; /* constant after init */
+
+ uint64_t ht_total_samples;
+ bool ondemand; /* Set to true to restart the state machine */
+ enum {
+ INIT = 0, /* initial state */
+ DISABLED, /* health checking is disabled */
+ STARTUP, /* doing startup tests, samples are discarded */
+ STEADY, /* steady-state operation */
+ FAILED, /* health check failed, discard samples */
+ } ht_state;
+} healthtest[ENTROPYSOURCE];
+
+#define RANDOM_SELFTEST_STARTUP_SAMPLES 1024 /* 4.3, requirement 4 */
+#define RANDOM_SELFTEST_APT_WINDOW 512 /* 4.4.2 */
+
+static void
+copy_event(uint32_t dst[static HARVESTSIZE + 1],
+ const struct harvest_event *event)
+{
+ memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1));
+ memcpy(dst, event->he_entropy, event->he_size);
+ dst[HARVESTSIZE] = event->he_somecounter;
+}
+
+static void
+random_healthtest_rct_init(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ ht->ht_rct_count = 1;
+ copy_event(ht->ht_rct_value, event);
+}
+
+/*
+ * Apply the repitition count test to a sample.
+ *
+ * Return false if the test failed, i.e., we observed >= C consecutive samples
+ * with the same value, and true otherwise.
+ */
+static bool
+random_healthtest_rct_next(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ uint32_t val[HARVESTSIZE + 1];
+
+ copy_event(val, event);
+ if (memcmp(val, ht->ht_rct_value, sizeof(ht->ht_rct_value)) != 0) {
+ ht->ht_rct_count = 1;
+ memcpy(ht->ht_rct_value, val, sizeof(ht->ht_rct_value));
+ return (true);
+ } else {
+ ht->ht_rct_count++;
+ return (ht->ht_rct_count < ht->ht_rct_limit);
+ }
+}
+
+static void
+random_healthtest_apt_init(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ ht->ht_apt_count = 1;
+ ht->ht_apt_seq = 1;
+ copy_event(ht->ht_apt_value, event);
+}
+
+static bool
+random_healthtest_apt_next(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ uint32_t val[HARVESTSIZE + 1];
+
+ if (ht->ht_apt_seq == 0) {
+ random_healthtest_apt_init(ht, event);
+ return (true);
+ }
+
+ copy_event(val, event);
+ if (memcmp(val, ht->ht_apt_value, sizeof(ht->ht_apt_value)) == 0) {
+ ht->ht_apt_count++;
+ if (ht->ht_apt_count >= ht->ht_apt_cutoff)
+ return (false);
+ }
+
+ ht->ht_apt_seq++;
+ if (ht->ht_apt_seq == RANDOM_SELFTEST_APT_WINDOW)
+ ht->ht_apt_seq = 0;
+
+ return (true);
+}
+
+/*
+ * Run the health tests for the given event. This is assumed to be called from
+ * a serialized context.
+ */
+bool
+random_harvest_healthtest(const struct harvest_event *event)
+{
+ struct health_test_softc *ht;
+
+ ht = &healthtest[event->he_source];
+
+ /*
+ * Was on-demand testing requested? Restart the state machine if so,
+ * restarting the startup tests.
+ */
+ if (atomic_load_bool(&ht->ondemand)) {
+ atomic_store_bool(&ht->ondemand, false);
+ ht->ht_state = INIT;
+ }
+
+ switch (ht->ht_state) {
+ case __predict_false(INIT):
+ /* Store the first sample and initialize test state. */
+ random_healthtest_rct_init(ht, event);
+ random_healthtest_apt_init(ht, event);
+ ht->ht_total_samples = 0;
+ ht->ht_state = STARTUP;
+ return (false);
+ case DISABLED:
+ /* No health testing for this source. */
+ return (true);
+ case STEADY:
+ case STARTUP:
+ ht->ht_total_samples++;
+ if (random_healthtest_rct_next(ht, event) &&
+ random_healthtest_apt_next(ht, event)) {
+ if (ht->ht_state == STARTUP &&
+ ht->ht_total_samples >=
+ RANDOM_SELFTEST_STARTUP_SAMPLES) {
+ printf(
+ "random: health test passed for source %s\n",
+ random_source_descr[event->he_source]);
+ ht->ht_state = STEADY;
+ }
+ return (ht->ht_state == STEADY);
+ }
+ ht->ht_state = FAILED;
+ printf(
+ "random: health test failed for source %s, discarding samples\n",
+ random_source_descr[event->he_source]);
+ /* FALLTHROUGH */
+ case FAILED:
+ return (false);
+ }
+}
+
+static bool nist_healthtest_enabled = false;
+SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled,
+ CTLFLAG_RDTUN, &nist_healthtest_enabled, 0,
+ "Enable NIST SP 800-90B health tests for noise sources");
+
+static void
+random_healthtest_init(enum random_entropy_source source)
+{
+ struct health_test_softc *ht;
+
+ ht = &healthtest[source];
+ KASSERT(ht->ht_state == INIT,
+ ("%s: health test state is %d for source %d",
+ __func__, ht->ht_state, source));
+
+ /*
+ * If health-testing is enabled, validate all sources except CACHED and
+ * VMGENID: they are deterministic sources used only a small, fixed
+ * number of times, so statistical testing is not applicable.
+ */
+ if (!nist_healthtest_enabled ||
+ source == RANDOM_CACHED || source == RANDOM_PURE_VMGENID) {
+ ht->ht_state = DISABLED;
+ return;
+ }
+
+ /*
+ * Set cutoff values for the two tests, assuming that each sample has
+ * min-entropy of 1 bit and allowing for an error rate of 1 in 2^{34}.
+ * With a sample rate of RANDOM_KTHREAD_HZ, we expect to see an false
+ * positive once in ~54.5 years.
+ *
+ * The RCT limit comes from the formula in section 4.4.1.
+ *
+ * The APT cutoff is calculated using the formula in section 4.4.2
+ * footnote 10 with the window size changed from 512 to 511, since the
+ * test as written counts the number of samples equal to the first
+ * sample in the window, and thus tests W-1 samples.
+ */
+ ht->ht_rct_limit = 35;
+ ht->ht_apt_cutoff = 330;
+}
+
+static int
+random_healthtest_ondemand(SYSCTL_HANDLER_ARGS)
+{
+ u_int mask, source;
+ int error;
+
+ mask = 0;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ while (mask != 0) {
+ source = ffs(mask) - 1;
+ if (source < nitems(healthtest))
+ atomic_store_bool(&healthtest[source].ondemand, true);
+ mask &= ~(1u << source);
+ }
+ return (0);
+}
+SYSCTL_PROC(_kern_random, OID_AUTO, nist_healthtest_ondemand,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
+ random_healthtest_ondemand, "I",
+ "Re-run NIST SP 800-90B startup health tests for a noise source");
+
static int
random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
{
@@ -374,7 +588,8 @@ static const char *random_source_descr[ENTROPYSOURCE] = {
[RANDOM_SWI] = "SWI",
[RANDOM_FS_ATIME] = "FS_ATIME",
[RANDOM_UMA] = "UMA",
- [RANDOM_CALLOUT] = "CALLOUT", /* ENVIRONMENTAL_END */
+ [RANDOM_CALLOUT] = "CALLOUT",
+ [RANDOM_RANDOMDEV] = "RANDOMDEV", /* ENVIRONMENTAL_END */
[RANDOM_PURE_OCTEON] = "PURE_OCTEON", /* PURE_START */
[RANDOM_PURE_SAFE] = "PURE_SAFE",
[RANDOM_PURE_GLXSB] = "PURE_GLXSB",
@@ -435,7 +650,10 @@ random_harvestq_init(void *unused __unused)
hc_source_mask = almost_everything_mask;
RANDOM_HARVEST_INIT_LOCK();
- harvest_context.hc_entropy_ring.in = harvest_context.hc_entropy_ring.out = 0;
+ harvest_context.hc_active_buf = 0;
+
+ for (int i = 0; i < ENTROPYSOURCE; i++)
+ random_healthtest_init(i);
}
SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL);
@@ -540,9 +758,9 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d
* This is supposed to be fast; do not do anything slow in here!
* It is also illegal (and morally reprehensible) to insert any
* high-rate data here. "High-rate" is defined as a data source
- * that will usually cause lots of failures of the "Lockless read"
- * check a few lines below. This includes the "always-on" sources
- * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources.
+ * that is likely to fill up the buffer in much less than 100ms.
+ * This includes the "always-on" sources like the Intel "rdrand"
+ * or the VIA Nehamiah "xstore" sources.
*/
/* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle
* counters are built in, but on older hardware it will do a real time clock
@@ -551,28 +769,29 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d
void
random_harvest_queue_(const void *entropy, u_int size, enum random_entropy_source origin)
{
+ struct harvest_context *hc;
+ struct entropy_buffer *buf;
struct harvest_event *event;
- u_int ring_in;
- KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
+ KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE,
+ ("%s: origin %d invalid", __func__, origin));
+
+ hc = &harvest_context;
RANDOM_HARVEST_LOCK();
- ring_in = (harvest_context.hc_entropy_ring.in + 1)%RANDOM_RING_MAX;
- if (ring_in != harvest_context.hc_entropy_ring.out) {
- /* The ring is not full */
- event = harvest_context.hc_entropy_ring.ring + ring_in;
+ buf = &hc->hc_entropy_buf[hc->hc_active_buf];
+ if (buf->pos < RANDOM_RING_MAX) {
+ event = &buf->ring[buf->pos++];
event->he_somecounter = random_get_cyclecount();
event->he_source = origin;
- event->he_destination = harvest_context.hc_destination[origin]++;
+ event->he_destination = hc->hc_destination[origin]++;
if (size <= sizeof(event->he_entropy)) {
event->he_size = size;
memcpy(event->he_entropy, entropy, size);
- }
- else {
+ } else {
/* Big event, so squash it */
event->he_size = sizeof(event->he_entropy[0]);
event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event);
}
- harvest_context.hc_entropy_ring.in = ring_in;
}
RANDOM_HARVEST_UNLOCK();
}