summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMarkus Friedl <markus@cvs.openbsd.org>2013-11-02 19:37:26 +0000
committerMarkus Friedl <markus@cvs.openbsd.org>2013-11-02 19:37:26 +0000
commite702e1d98c0356fff8759bff1fb36dc391c6291d (patch)
treedd8b95f2464d648da1039fe9bd07a8799d81ee01 /sys
parentdf92201897a9af983f1d34094291c8170d5ca966 (diff)
replace rc4 with ChaCha20 here, too; ok djm, tedu, deraadt
Diffstat (limited to 'sys')
-rw-r--r--sys/crypto/chacha_private.h220
-rw-r--r--sys/dev/rnd.c224
2 files changed, 374 insertions, 70 deletions
diff --git a/sys/crypto/chacha_private.h b/sys/crypto/chacha_private.h
new file mode 100644
index 00000000000..66b57c59d7b
--- /dev/null
+++ b/sys/crypto/chacha_private.h
@@ -0,0 +1,220 @@
+/*
+chacha-merged.c version 20080118
+D. J. Bernstein
+Public domain.
+*/
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+typedef struct
+{
+ u32 input[16]; /* could be compressed */
+} chacha_ctx;
+
+#define U8C(v) (v##U)
+#define U32C(v) (v##U)
+
+#define U8V(v) ((u8)(v) & U8C(0xFF))
+#define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF))
+
+#define ROTL32(v, n) \
+ (U32V((v) << (n)) | ((v) >> (32 - (n))))
+
+#define U8TO32_LITTLE(p) \
+ (((u32)((p)[0]) ) | \
+ ((u32)((p)[1]) << 8) | \
+ ((u32)((p)[2]) << 16) | \
+ ((u32)((p)[3]) << 24))
+
+#define U32TO8_LITTLE(p, v) \
+ do { \
+ (p)[0] = U8V((v) ); \
+ (p)[1] = U8V((v) >> 8); \
+ (p)[2] = U8V((v) >> 16); \
+ (p)[3] = U8V((v) >> 24); \
+ } while (0)
+
+#define ROTATE(v,c) (ROTL32(v,c))
+#define XOR(v,w) ((v) ^ (w))
+#define PLUS(v,w) (U32V((v) + (w)))
+#define PLUSONE(v) (PLUS((v),1))
+
+#define QUARTERROUND(a,b,c,d) \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
+
+static const char sigma[16] = "expand 32-byte k";
+static const char tau[16] = "expand 16-byte k";
+
+static void
+chacha_keysetup(chacha_ctx *x,const u8 *k,u32 kbits,u32 ivbits)
+{
+ const char *constants;
+
+ x->input[4] = U8TO32_LITTLE(k + 0);
+ x->input[5] = U8TO32_LITTLE(k + 4);
+ x->input[6] = U8TO32_LITTLE(k + 8);
+ x->input[7] = U8TO32_LITTLE(k + 12);
+ if (kbits == 256) { /* recommended */
+ k += 16;
+ constants = sigma;
+ } else { /* kbits == 128 */
+ constants = tau;
+ }
+ x->input[8] = U8TO32_LITTLE(k + 0);
+ x->input[9] = U8TO32_LITTLE(k + 4);
+ x->input[10] = U8TO32_LITTLE(k + 8);
+ x->input[11] = U8TO32_LITTLE(k + 12);
+ x->input[0] = U8TO32_LITTLE(constants + 0);
+ x->input[1] = U8TO32_LITTLE(constants + 4);
+ x->input[2] = U8TO32_LITTLE(constants + 8);
+ x->input[3] = U8TO32_LITTLE(constants + 12);
+}
+
+static void
+chacha_ivsetup(chacha_ctx *x,const u8 *iv)
+{
+ x->input[12] = 0;
+ x->input[13] = 0;
+ x->input[14] = U8TO32_LITTLE(iv + 0);
+ x->input[15] = U8TO32_LITTLE(iv + 4);
+}
+
+static void
+chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes)
+{
+ u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+ u32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+ u8 *ctarget = NULL;
+ u8 tmp[64];
+ u_int i;
+
+ if (!bytes) return;
+
+ j0 = x->input[0];
+ j1 = x->input[1];
+ j2 = x->input[2];
+ j3 = x->input[3];
+ j4 = x->input[4];
+ j5 = x->input[5];
+ j6 = x->input[6];
+ j7 = x->input[7];
+ j8 = x->input[8];
+ j9 = x->input[9];
+ j10 = x->input[10];
+ j11 = x->input[11];
+ j12 = x->input[12];
+ j13 = x->input[13];
+ j14 = x->input[14];
+ j15 = x->input[15];
+
+ for (;;) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i) tmp[i] = m[i];
+ m = tmp;
+ ctarget = c;
+ c = tmp;
+ }
+ x0 = j0;
+ x1 = j1;
+ x2 = j2;
+ x3 = j3;
+ x4 = j4;
+ x5 = j5;
+ x6 = j6;
+ x7 = j7;
+ x8 = j8;
+ x9 = j9;
+ x10 = j10;
+ x11 = j11;
+ x12 = j12;
+ x13 = j13;
+ x14 = j14;
+ x15 = j15;
+ for (i = 20;i > 0;i -= 2) {
+ QUARTERROUND( x0, x4, x8,x12)
+ QUARTERROUND( x1, x5, x9,x13)
+ QUARTERROUND( x2, x6,x10,x14)
+ QUARTERROUND( x3, x7,x11,x15)
+ QUARTERROUND( x0, x5,x10,x15)
+ QUARTERROUND( x1, x6,x11,x12)
+ QUARTERROUND( x2, x7, x8,x13)
+ QUARTERROUND( x3, x4, x9,x14)
+ }
+ x0 = PLUS(x0,j0);
+ x1 = PLUS(x1,j1);
+ x2 = PLUS(x2,j2);
+ x3 = PLUS(x3,j3);
+ x4 = PLUS(x4,j4);
+ x5 = PLUS(x5,j5);
+ x6 = PLUS(x6,j6);
+ x7 = PLUS(x7,j7);
+ x8 = PLUS(x8,j8);
+ x9 = PLUS(x9,j9);
+ x10 = PLUS(x10,j10);
+ x11 = PLUS(x11,j11);
+ x12 = PLUS(x12,j12);
+ x13 = PLUS(x13,j13);
+ x14 = PLUS(x14,j14);
+ x15 = PLUS(x15,j15);
+
+#ifndef KEYSTREAM_ONLY
+ x0 = XOR(x0,U8TO32_LITTLE(m + 0));
+ x1 = XOR(x1,U8TO32_LITTLE(m + 4));
+ x2 = XOR(x2,U8TO32_LITTLE(m + 8));
+ x3 = XOR(x3,U8TO32_LITTLE(m + 12));
+ x4 = XOR(x4,U8TO32_LITTLE(m + 16));
+ x5 = XOR(x5,U8TO32_LITTLE(m + 20));
+ x6 = XOR(x6,U8TO32_LITTLE(m + 24));
+ x7 = XOR(x7,U8TO32_LITTLE(m + 28));
+ x8 = XOR(x8,U8TO32_LITTLE(m + 32));
+ x9 = XOR(x9,U8TO32_LITTLE(m + 36));
+ x10 = XOR(x10,U8TO32_LITTLE(m + 40));
+ x11 = XOR(x11,U8TO32_LITTLE(m + 44));
+ x12 = XOR(x12,U8TO32_LITTLE(m + 48));
+ x13 = XOR(x13,U8TO32_LITTLE(m + 52));
+ x14 = XOR(x14,U8TO32_LITTLE(m + 56));
+ x15 = XOR(x15,U8TO32_LITTLE(m + 60));
+#endif
+
+ j12 = PLUSONE(j12);
+ if (!j12) {
+ j13 = PLUSONE(j13);
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
+ }
+
+ U32TO8_LITTLE(c + 0,x0);
+ U32TO8_LITTLE(c + 4,x1);
+ U32TO8_LITTLE(c + 8,x2);
+ U32TO8_LITTLE(c + 12,x3);
+ U32TO8_LITTLE(c + 16,x4);
+ U32TO8_LITTLE(c + 20,x5);
+ U32TO8_LITTLE(c + 24,x6);
+ U32TO8_LITTLE(c + 28,x7);
+ U32TO8_LITTLE(c + 32,x8);
+ U32TO8_LITTLE(c + 36,x9);
+ U32TO8_LITTLE(c + 40,x10);
+ U32TO8_LITTLE(c + 44,x11);
+ U32TO8_LITTLE(c + 48,x12);
+ U32TO8_LITTLE(c + 52,x13);
+ U32TO8_LITTLE(c + 56,x14);
+ U32TO8_LITTLE(c + 60,x15);
+
+ if (bytes <= 64) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i) ctarget[i] = c[i];
+ }
+ x->input[12] = j12;
+ x->input[13] = j13;
+ return;
+ }
+ bytes -= 64;
+ c += 64;
+#ifndef KEYSTREAM_ONLY
+ m += 64;
+#endif
+ }
+}
diff --git a/sys/dev/rnd.c b/sys/dev/rnd.c
index 4c20155f8fe..f1e1a27c719 100644
--- a/sys/dev/rnd.c
+++ b/sys/dev/rnd.c
@@ -1,9 +1,10 @@
-/* $OpenBSD: rnd.c,v 1.144 2013/10/30 02:13:16 dlg Exp $ */
+/* $OpenBSD: rnd.c,v 1.145 2013/11/02 19:37:25 markus Exp $ */
/*
* Copyright (c) 2011 Theo de Raadt.
* Copyright (c) 2008 Damien Miller.
* Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
+ * Copyright (c) 2013 Markus Friedl.
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
* All rights reserved.
*
@@ -124,7 +125,9 @@
#include <sys/msgbuf.h>
#include <crypto/md5.h>
-#include <crypto/arc4.h>
+
+#define KEYSTREAM_ONLY
+#include <crypto/chacha_private.h>
#include <dev/rndvar.h>
@@ -522,28 +525,7 @@ extract_entropy(u_int8_t *buf, int nbytes)
explicit_bzero(buffer, sizeof(buffer));
}
-/*
- * Bytes of key material for each rc4 instance.
- */
-#define ARC4_KEY_BYTES 64
-
-/*
- * Throw away a multiple of the first N words of output, as suggested
- * in the paper "Weaknesses in the Key Scheduling Algorithm of RC4"
- * by Fluher, Mantin, and Shamir. (N = 256 in our case.) If the start
- * of a new RC stream is an event that a consumer could spot, we drop
- * the strictly recommended amount (ceil(n/log e) = 6). If consumers
- * only see random sub-streams, we cheat and do less computation.
- */
-#define ARC4_STATE 256
-#define ARC4_DISCARD_SAFE 6
-#define ARC4_DISCARD_CHEAP 4
-
-/*
- * Start with an unstable state so that rc4_getbytes() can
- * operate (poorly) before rc4_keysetup().
- */
-struct rc4_ctx arc4random_state = { 0, 0, { 1, 2, 3, 4, 5, 6 } };
+/* random keystream by ChaCha */
struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
struct timeout arc4_timeout;
@@ -552,6 +534,135 @@ struct task arc4_task;
void arc4_reinit(void *v); /* timeout to start reinit */
void arc4_init(void *, void *); /* actually do the reinit */
+#define KEYSZ 32
+#define IVSZ 8
+#define BLOCKSZ 64
+#define RSBUFSZ (16*BLOCKSZ)
+static int rs_initialized;
+static chacha_ctx rs; /* chacha context for random keystream */
+static u_char rs_buf[RSBUFSZ]; /* keystream blocks */
+static size_t rs_have; /* valid bytes at end of rs_buf */
+static size_t rs_count; /* bytes till reseed */
+
+static inline void _rs_rekey(u_char *dat, size_t datlen);
+
+static inline void
+_rs_init(u_char *buf, size_t n)
+{
+ KASSERT(n >= KEYSZ + IVSZ);
+ chacha_keysetup(&rs, buf, KEYSZ * 8, 0);
+ chacha_ivsetup(&rs, buf + KEYSZ);
+}
+
+static void
+_rs_seed(u_char *buf, size_t n)
+{
+ if (!rs_initialized) {
+ rs_initialized = 1;
+ _rs_init(buf, n);
+ } else
+ _rs_rekey(buf, n);
+
+ /* invalidate rs_buf */
+ rs_have = 0;
+ memset(rs_buf, 0, RSBUFSZ);
+
+ rs_count = 1600000;
+}
+
+static void
+_rs_stir(int do_lock)
+{
+ struct timespec ts;
+ u_int8_t buf[KEYSZ + IVSZ], *p;
+ int i;
+
+ /*
+ * Use MD5 PRNG data and a system timespec; early in the boot
+ * process this is the best we can do -- some architectures do
+ * not collect entropy very well during this time, but may have
+ * clock information which is better than nothing.
+ */
+ extract_entropy((u_int8_t *)buf, sizeof buf);
+
+ nanotime(&ts);
+ for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
+ buf[i] ^= p[i];
+
+ if (do_lock)
+ mtx_enter(&rndlock);
+ _rs_seed(buf, sizeof(buf));
+ rndstats.arc4_nstirs++;
+ if (do_lock)
+ mtx_leave(&rndlock);
+
+ explicit_bzero(buf, sizeof(buf));
+}
+
+static inline void
+_rs_stir_if_needed(size_t len)
+{
+ if (rs_count <= len || !rs_initialized)
+ _rs_stir(0);
+ else
+ rs_count -= len;
+}
+
+static inline void
+_rs_rekey(u_char *dat, size_t datlen)
+{
+#ifndef KEYSTREAM_ONLY
+ memset(rs_buf, 0,RSBUFSZ);
+#endif
+ /* fill rs_buf with the keystream */
+ chacha_encrypt_bytes(&rs, rs_buf, rs_buf, RSBUFSZ);
+ /* mix in optional user provided data */
+ if (dat) {
+ size_t i, m;
+
+ m = MIN(datlen, KEYSZ + IVSZ);
+ for (i = 0; i < m; i++)
+ rs_buf[i] ^= dat[i];
+ }
+ /* immediately reinit for backtracking resistance */
+ _rs_init(rs_buf, KEYSZ + IVSZ);
+ memset(rs_buf, 0, KEYSZ + IVSZ);
+ rs_have = RSBUFSZ - KEYSZ - IVSZ;
+}
+
+static inline void
+_rs_random_buf(void *_buf, size_t n)
+{
+ u_char *buf = (u_char *)_buf;
+ size_t m;
+
+ _rs_stir_if_needed(n);
+ while (n > 0) {
+ if (rs_have > 0) {
+ m = MIN(n, rs_have);
+ memcpy(buf, rs_buf + RSBUFSZ - rs_have, m);
+ memset(rs_buf + RSBUFSZ - rs_have, 0, m);
+ buf += m;
+ n -= m;
+ rs_have -= m;
+ }
+ if (rs_have == 0)
+ _rs_rekey(NULL, 0);
+ }
+}
+
+static inline void
+_rs_random_u32(u_int32_t *val)
+{
+ _rs_stir_if_needed(sizeof(*val));
+ if (rs_have < sizeof(*val))
+ _rs_rekey(NULL, 0);
+ memcpy(val, rs_buf + RSBUFSZ - rs_have, sizeof(*val));
+ memset(rs_buf + RSBUFSZ - rs_have, 0, sizeof(*val));
+ rs_have -= sizeof(*val);
+ return;
+}
+
/* Return one word of randomness from an RC4 generator */
u_int32_t
arc4random(void)
@@ -559,7 +670,7 @@ arc4random(void)
u_int32_t ret;
mtx_enter(&rndlock);
- rc4_getbytes(&arc4random_state, (u_char *)&ret, sizeof(ret));
+ _rs_random_u32(&ret);
rndstats.arc4_reads += sizeof(ret);
mtx_leave(&rndlock);
return ret;
@@ -572,7 +683,7 @@ void
arc4random_buf(void *buf, size_t n)
{
mtx_enter(&rndlock);
- rc4_getbytes(&arc4random_state, (u_char *)buf, n);
+ _rs_random_buf(buf, n);
rndstats.arc4_reads += n;
mtx_leave(&rndlock);
}
@@ -617,40 +728,7 @@ arc4random_uniform(u_int32_t upper_bound)
void
arc4_init(void *v, void *w)
{
- struct rc4_ctx new_ctx;
- struct timespec ts;
- u_int8_t buf[ARC4_KEY_BYTES], *p;
- int i;
-
- /*
- * Use MD5 PRNG data and a system timespec; early in the boot
- * process this is the best we can do -- some architectures do
- * not collect entropy very well during this time, but may have
- * clock information which is better than nothing.
- */
- extract_entropy((u_int8_t *)buf, sizeof buf);
- if (timeout_initialized(&rnd_timeout))
- nanotime(&ts);
- for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
- buf[i] ^= p[i];
-
- /* Carry over some state from the previous PRNG instance */
- mtx_enter(&rndlock);
- if (rndstats.arc4_nstirs > 0)
- rc4_crypt(&arc4random_state, buf, buf, sizeof(buf));
- mtx_leave(&rndlock);
-
- rc4_keysetup(&new_ctx, buf, sizeof(buf));
- rc4_skip(&new_ctx, ARC4_STATE * ARC4_DISCARD_CHEAP);
-
- mtx_enter(&rndlock);
- bcopy(&new_ctx, &arc4random_state, sizeof(new_ctx));
- rndstats.rnd_used += sizeof(buf) * 8;
- rndstats.arc4_nstirs++;
- mtx_leave(&rndlock);
-
- explicit_bzero(buf, sizeof(buf));
- explicit_bzero(&new_ctx, sizeof(new_ctx));
+ _rs_stir(1);
}
/*
@@ -667,6 +745,8 @@ arc4_reinit(void *v)
void
random_init(void)
{
+ int off;
+
rnd_states[RND_SRC_TIMER].dont_count_entropy = 1;
rnd_states[RND_SRC_TRUE].dont_count_entropy = 1;
rnd_states[RND_SRC_TRUE].max_entropy = 1;
@@ -676,7 +756,8 @@ random_init(void)
* NOTE: We assume there are at 8192 bytes mapped after version,
* because we want to pull some "code" in as well.
*/
- rc4_keysetup(&arc4random_state, (u_int8_t *)&version, 8192);
+ for (off = 0; off < 8192 - KEYSZ - IVSZ; off += KEYSZ + IVSZ)
+ _rs_seed((u_int8_t *)version + off, KEYSZ + IVSZ);
}
void
@@ -723,8 +804,8 @@ randomclose(dev_t dev, int flag, int mode, struct proc *p)
}
/*
- * Maximum number of bytes to serve directly from the main arc4random
- * pool. Larger requests are served from a discrete arc4 instance keyed
+ * Maximum number of bytes to serve directly from the main ChaCha
+ * pool. Larger requests are served from a discrete ChaCha instance keyed
* from the main pool.
*/
#define ARC4_MAIN_MAX_BYTES 2048
@@ -732,8 +813,8 @@ randomclose(dev_t dev, int flag, int mode, struct proc *p)
int
randomread(dev_t dev, struct uio *uio, int ioflag)
{
- u_char lbuf[ARC4_KEY_BYTES];
- struct rc4_ctx lctx;
+ u_char lbuf[KEYSZ+IVSZ];
+ chacha_ctx lctx;
size_t total = uio->uio_resid;
u_char *buf;
int myctx = 0, ret = 0;
@@ -744,8 +825,8 @@ randomread(dev_t dev, struct uio *uio, int ioflag)
buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
if (total > ARC4_MAIN_MAX_BYTES) {
arc4random_buf(lbuf, sizeof(lbuf));
- rc4_keysetup(&lctx, lbuf, sizeof(lbuf));
- rc4_skip(&lctx, ARC4_STATE * ARC4_DISCARD_SAFE);
+ chacha_keysetup(&lctx, lbuf, KEYSZ * 8, 0);
+ chacha_ivsetup(&lctx, lbuf + KEYSZ);
explicit_bzero(lbuf, sizeof(lbuf));
myctx = 1;
}
@@ -753,9 +834,12 @@ randomread(dev_t dev, struct uio *uio, int ioflag)
while (ret == 0 && uio->uio_resid > 0) {
int n = min(POOLBYTES, uio->uio_resid);
- if (myctx)
- rc4_getbytes(&lctx, buf, n);
- else
+ if (myctx) {
+#ifndef KEYSTREAM_ONLY
+ bzero(buf, n);
+#endif
+ chacha_encrypt_bytes(&lctx, buf, buf, n);
+ } else
arc4random_buf(buf, n);
ret = uiomove((caddr_t)buf, n, uio);
if (ret == 0 && uio->uio_resid > 0)