summaryrefslogtreecommitdiff
path: root/sys/dev/rnd.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/rnd.c')
-rw-r--r--sys/dev/rnd.c71
1 files changed, 33 insertions, 38 deletions
diff --git a/sys/dev/rnd.c b/sys/dev/rnd.c
index cec2e1a2292..613bc93d421 100644
--- a/sys/dev/rnd.c
+++ b/sys/dev/rnd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rnd.c,v 1.193 2017/07/30 21:40:14 deraadt Exp $ */
+/* $OpenBSD: rnd.c,v 1.194 2017/11/19 13:43:06 mikeb Exp $ */
/*
* Copyright (c) 2011 Theo de Raadt.
@@ -197,17 +197,13 @@
#define POOL_TAP3 819
#define POOL_TAP4 411
-struct mutex entropylock = MUTEX_INITIALIZER(IPL_HIGH);
-
/*
* Raw entropy collection from device drivers; at interrupt context or not.
* add_*_randomness() provide data which is put into the entropy queue.
- * Almost completely under the entropylock.
*/
-#define QEVLEN (1024 / sizeof(struct rand_event))
+#define QEVLEN 128 /* must be a power of 2 */
#define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
-#define QEVSBITS 10
#define KEYSZ 32
#define IVSZ 8
@@ -219,8 +215,12 @@ struct rand_event {
u_int re_time;
u_int re_val;
} rnd_event_space[QEVLEN];
-/* index of next free slot */
-u_int rnd_event_idx;
+
+u_int rnd_event_cons;
+u_int rnd_event_prod;
+
+struct mutex rnd_enqlck = MUTEX_INITIALIZER(IPL_HIGH);
+struct mutex rnd_deqlck = MUTEX_INITIALIZER(IPL_HIGH);
struct timeout rnd_timeout;
@@ -246,39 +246,38 @@ struct filterops randomread_filtops =
struct filterops randomwrite_filtops =
{ 1, NULL, filt_randomdetach, filt_randomwrite };
-static __inline struct rand_event *
+static inline struct rand_event *
rnd_get(void)
{
- if (rnd_event_idx == 0)
+ u_int idx;
+
+ /* nothing to do if queue is empty */
+ if (rnd_event_prod == rnd_event_cons)
return NULL;
- /* if it wrapped around, start dequeuing at the end */
- if (rnd_event_idx > QEVLEN)
- rnd_event_idx = QEVLEN;
- return &rnd_event_space[--rnd_event_idx];
+ if (rnd_event_prod - rnd_event_cons > QEVLEN)
+ rnd_event_cons = rnd_event_prod - QEVLEN;
+ idx = rnd_event_cons++;
+ return &rnd_event_space[idx & (QEVLEN - 1)];
}
-static __inline struct rand_event *
+static inline struct rand_event *
rnd_put(void)
{
- u_int idx = rnd_event_idx++;
+ u_int idx = rnd_event_prod++;
/* allow wrapping. caller will use xor. */
- idx = idx % QEVLEN;
-
- return &rnd_event_space[idx];
+ return &rnd_event_space[idx & (QEVLEN - 1)];
}
-static __inline u_int
+static inline u_int
rnd_qlen(void)
{
- return rnd_event_idx;
+ return rnd_event_prod - rnd_event_cons;
}
/*
- * This function adds entropy to the entropy pool by using timing
- * delays. It uses the timer_rand_state structure to make an estimate
- * of how many bits of entropy this call has added to the pool.
+ * This function adds entropy to the entropy pool by using timing delays.
*
* The number "val" is also added to the pool - it should somehow describe
* the type of event which just happened. Currently the values of 0-255
@@ -289,6 +288,7 @@ enqueue_randomness(u_int state, u_int val)
{
struct rand_event *rep;
struct timespec ts;
+ u_int qlen;
#ifdef DIAGNOSTIC
if (state >= RND_SRC_NUM)
@@ -300,18 +300,16 @@ enqueue_randomness(u_int state, u_int val)
val += state << 13;
- mtx_enter(&entropylock);
-
+ mtx_enter(&rnd_enqlck);
rep = rnd_put();
-
rep->re_time += ts.tv_nsec ^ (ts.tv_sec << 20);
rep->re_val += val;
+ qlen = rnd_qlen();
+ mtx_leave(&rnd_enqlck);
- if (rnd_qlen() > QEVSLOW/2 && timeout_initialized(&rnd_timeout) &&
+ if (qlen > QEVSLOW/2 && timeout_initialized(&rnd_timeout) &&
!timeout_pending(&rnd_timeout))
timeout_add(&rnd_timeout, 1);
-
- mtx_leave(&entropylock);
}
/*
@@ -373,21 +371,18 @@ dequeue_randomness(void *v)
struct rand_event *rep;
u_int32_t buf[2];
- mtx_enter(&entropylock);
-
if (timeout_initialized(&rnd_timeout))
timeout_del(&rnd_timeout);
+ mtx_enter(&rnd_deqlck);
while ((rep = rnd_get())) {
buf[0] = rep->re_time;
buf[1] = rep->re_val;
- mtx_leave(&entropylock);
-
+ mtx_leave(&rnd_deqlck);
add_entropy_words(buf, 2);
-
- mtx_enter(&entropylock);
+ mtx_enter(&rnd_deqlck);
}
- mtx_leave(&entropylock);
+ mtx_leave(&rnd_deqlck);
}
/*
@@ -406,7 +401,7 @@ extract_entropy(u_int8_t *buf)
#endif
/*
- * INTENTIONALLY not protected by entropylock. Races during
+ * INTENTIONALLY not protected by any lock. Races during
* memcpy() result in acceptable input data; races during
* SHA512Update() would create nasty data dependencies. We
* do not rely on this as a benefit, but if it happens, cool.