summaryrefslogtreecommitdiff
path: root/sys/dev/rnd.c
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2020-05-26 14:27:25 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2020-05-26 14:27:25 +0000
commit251ec88e8e0c79792066e10548ef4fd68aad0aac (patch)
tree39ea00b8f3aaa883702e2cfbc0025c49ad795d86 /sys/dev/rnd.c
parent9ab2a0bb4bc558ed94f64176a61dc82caddf4528 (diff)
Rewrite the entropy enqueue ring to collect damage asyncronously, and adapt the
dequeue to mix a selection of "best" ring entries. Change the dequeue timeout to exponentially backoff because excessive pool buffer generation is pointless -- rekey's generally happen at 1.6MB and a long timeout, a lot of cpu cycles were being wasted. During boot-up (before timeouts work) aggressively consume enqueue damage and rekey every time, to accelerate entropy injection into the chacha ring. The goal is to compensate rapidly for weak seeding in unidentifiable conditions, and ensure quality to arc4random() calls early in boot. ok kettenis visa
Diffstat (limited to 'sys/dev/rnd.c')
-rw-r--r--sys/dev/rnd.c132
1 files changed, 60 insertions, 72 deletions
diff --git a/sys/dev/rnd.c b/sys/dev/rnd.c
index ec2c507b15b..a3b40900ef2 100644
--- a/sys/dev/rnd.c
+++ b/sys/dev/rnd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rnd.c,v 1.215 2020/05/25 17:52:57 naddy Exp $ */
+/* $OpenBSD: rnd.c,v 1.216 2020/05/26 14:27:24 deraadt Exp $ */
/*
* Copyright (c) 2011 Theo de Raadt.
@@ -68,6 +68,7 @@
#include <sys/ioctl.h>
#include <sys/malloc.h>
#include <sys/timeout.h>
+#include <sys/atomic.h>
#include <sys/mutex.h>
#include <sys/task.h>
#include <sys/msgbuf.h>
@@ -119,7 +120,7 @@
*/
#define QEVLEN 128 /* must be a power of 2 */
-#define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
+#define QEVCONSUME 8 /* how many events to consume a time */
#define KEYSZ 32
#define IVSZ 8
@@ -128,17 +129,17 @@
#define EBUFSIZE KEYSZ + IVSZ
struct rand_event {
- u_int re_time;
- u_int re_val;
+ u_int re_time;
+ u_int re_val;
} rnd_event_space[QEVLEN];
-u_int rnd_event_cons;
-u_int rnd_event_prod;
+u_int rnd_event_cons;
+u_int rnd_event_prod;
+int rnd_cold = 1;
+int rnd_slowextract = 1;
-struct mutex rnd_enqlck = MUTEX_INITIALIZER(IPL_HIGH);
-struct mutex rnd_deqlck = MUTEX_INITIALIZER(IPL_HIGH);
-
-struct timeout rnd_timeout;
+void rnd_reinit(void *v); /* timeout to start reinit */
+void rnd_init(void *); /* actually do the reinit */
static u_int32_t entropy_pool[POOLWORDS];
u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
@@ -148,6 +149,8 @@ void add_entropy_words(const u_int32_t *, u_int);
void extract_entropy(u_int8_t *)
__attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
+struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
+
int filt_randomread(struct knote *, long);
void filt_randomdetach(struct knote *);
int filt_randomwrite(struct knote *, long);
@@ -169,36 +172,6 @@ const struct filterops randomwrite_filtops = {
.f_event = filt_randomwrite,
};
-static inline struct rand_event *
-rnd_get(void)
-{
- u_int idx;
-
- /* nothing to do if queue is empty */
- if (rnd_event_prod == rnd_event_cons)
- return NULL;
-
- if (rnd_event_prod - rnd_event_cons > QEVLEN)
- rnd_event_cons = rnd_event_prod - QEVLEN;
- idx = rnd_event_cons++;
- return &rnd_event_space[idx & (QEVLEN - 1)];
-}
-
-static inline struct rand_event *
-rnd_put(void)
-{
- u_int idx = rnd_event_prod++;
-
- /* allow wrapping. caller will mix it in. */
- return &rnd_event_space[idx & (QEVLEN - 1)];
-}
-
-static inline u_int
-rnd_qlen(void)
-{
- return rnd_event_prod - rnd_event_cons;
-}
-
/*
* This function mixes entropy and timing into the entropy input ring.
*/
@@ -207,22 +180,24 @@ enqueue_randomness(u_int val)
{
struct rand_event *rep;
struct timespec ts;
- u_int qlen;
+ int e;
- timespecclear(&ts);
- if (timeout_initialized(&rnd_timeout))
- nanotime(&ts);
-
- mtx_enter(&rnd_enqlck);
- rep = rnd_put();
+ nanotime(&ts);
+ e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
+ rep = &rnd_event_space[e];
rep->re_time += ts.tv_nsec ^ (ts.tv_sec << 20);
rep->re_val += val;
- qlen = rnd_qlen();
- mtx_leave(&rnd_enqlck);
- if (qlen > QEVSLOW/2 && timeout_initialized(&rnd_timeout) &&
- !timeout_pending(&rnd_timeout))
- timeout_add(&rnd_timeout, 1);
+ if (rnd_cold) {
+ dequeue_randomness(NULL);
+ rnd_init(NULL);
+ if (!cold)
+ rnd_cold = 0;
+ } else if (!timeout_pending(&rnd_timeout) &&
+ (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
+ rnd_slowextract = min(rnd_slowextract * 2, 5000);
+ timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
+ }
}
/*
@@ -267,28 +242,40 @@ add_entropy_words(const u_int32_t *buf, u_int n)
}
/*
- * Pulls entropy out of the queue and merges it into the pool
- * with the CRC.
+ * Pulls entropy out of the queue and merges it into the poll with the
+ * CRC. This takes a mix of fresh entries from the producer end of the
+ * queue and entries from the consumer end of the queue which are
+ * likely to have collected more damage.
*/
/* ARGSUSED */
void
dequeue_randomness(void *v)
{
- struct rand_event *rep;
u_int32_t buf[2];
+ u_int startp, startc, i;
- if (timeout_initialized(&rnd_timeout))
+ if (!rnd_cold)
timeout_del(&rnd_timeout);
- mtx_enter(&rnd_deqlck);
- while ((rep = rnd_get())) {
- buf[0] = rep->re_time;
- buf[1] = rep->re_val;
- mtx_leave(&rnd_deqlck);
+ /* Some very new damage */
+ startp = rnd_event_prod - QEVCONSUME;
+ for (i = 0; i < QEVCONSUME; i++) {
+ u_int e = (startp + i) & (QEVLEN-1);
+
+ buf[0] = rnd_event_space[e].re_time;
+ buf[1] = rnd_event_space[e].re_val;
add_entropy_words(buf, 2);
- mtx_enter(&rnd_deqlck);
}
- mtx_leave(&rnd_deqlck);
+ /* and some probably more damaged */
+ startc = rnd_event_cons;
+ for (i = 0; i < QEVCONSUME; i++) {
+ u_int e = (startc + i) & (QEVLEN-1);
+
+ buf[0] = rnd_event_space[e].re_time;
+ buf[1] = rnd_event_space[e].re_val;
+ add_entropy_words(buf, 2);
+ }
+ rnd_event_cons = startp + QEVCONSUME;
}
/*
@@ -322,8 +309,12 @@ extract_entropy(u_int8_t *buf)
/* Copy data to destination buffer */
memcpy(buf, digest, EBUFSIZE);
- /* Modify pool so next hash will produce different results */
- enqueue_randomness(extract_pool[0]);
+ /*
+ * Modify pool so next hash will produce different results.
+ * During boot-time enqueue/dequeue stage, avoid recursion.
+ */
+ if (!rnd_cold)
+ enqueue_randomness(extract_pool[0]);
dequeue_randomness(NULL);
/* Wipe data from memory */
@@ -333,11 +324,8 @@ extract_entropy(u_int8_t *buf)
/* random keystream by ChaCha */
-void rnd_reinit(void *v); /* timeout to start reinit */
-void rnd_init(void *); /* actually do the reinit */
-
struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
-struct timeout rndreinit_timeout;
+struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
static chacha_ctx rs; /* chacha context for random keystream */
@@ -422,8 +410,10 @@ _rs_stir(int do_lock)
_rs_seed(buf, sizeof(buf));
if (do_lock)
mtx_leave(&rndlock);
-
explicit_bzero(buf, sizeof(buf));
+
+ /* encourage fast-dequeue again */
+ rnd_slowextract = 1;
}
static inline void
@@ -677,9 +667,7 @@ random_start(int goodseed)
dequeue_randomness(NULL);
rnd_init(NULL);
- timeout_set(&rndreinit_timeout, rnd_reinit, NULL);
rnd_reinit(NULL);
- timeout_set(&rnd_timeout, dequeue_randomness, NULL);
if (goodseed)
printf("random: good seed from bootblocks\n");