summaryrefslogtreecommitdiff
path: root/lib/libcrypto/rand
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2001-08-01 19:51:18 +0000
committerBob Beck <beck@cvs.openbsd.org>2001-08-01 19:51:18 +0000
commit38223f1fe95067b826a997cfe88963e9e93cf6c9 (patch)
tree54b73def4615fc41d3103d101265ee0a2024baa6 /lib/libcrypto/rand
parent98ba4ab2de5732015ef5842227bc313d6f6c67ae (diff)
merge openssl 0.9.6b-engine
Note that this is a maintenence release, API's appear *not* to have changed. As such, I have only increased the minor number on these libraries
Diffstat (limited to 'lib/libcrypto/rand')
-rw-r--r--lib/libcrypto/rand/md_rand.c99
-rw-r--r--lib/libcrypto/rand/randfile.c8
2 files changed, 73 insertions, 34 deletions
diff --git a/lib/libcrypto/rand/md_rand.c b/lib/libcrypto/rand/md_rand.c
index ae57570608c..04b9d695b0d 100644
--- a/lib/libcrypto/rand/md_rand.c
+++ b/lib/libcrypto/rand/md_rand.c
@@ -141,10 +141,11 @@ static long md_count[2]={0,0};
static double entropy=0;
static int initialized=0;
-/* This should be set to 1 only when ssleay_rand_add() is called inside
- an already locked state, so it doesn't try to lock and thereby cause
- a hang. And it should always be reset back to 0 before unlocking. */
-static int add_do_not_lock=0;
+static unsigned int crypto_lock_rand = 0; /* may be set only when a thread
+ * holds CRYPTO_LOCK_RAND
+ * (to prevent double locking) */
+static unsigned long locking_thread = 0; /* valid iff crypto_lock_rand is set */
+
#ifdef PREDICT
int rand_predictable=0;
@@ -191,6 +192,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
long md_c[2];
unsigned char local_md[MD_DIGEST_LENGTH];
MD_CTX m;
+ int do_not_lock;
/*
* (Based on the rand(3) manpage)
@@ -207,7 +209,10 @@ static void ssleay_rand_add(const void *buf, int num, double add)
* hash function.
*/
- if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
+ /* check if we already have the lock */
+ do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
+
+ if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
st_idx=state_index;
/* use our own copies of the counters so that even
@@ -239,7 +244,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);
- if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+ if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
for (i=0; i<num; i+=MD_DIGEST_LENGTH)
{
@@ -281,7 +286,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
}
memset((char *)&m,0,sizeof(m));
- if (!add_do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
+ if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
/* Don't just copy back local_md into md -- this could mean that
* other thread's seeding remains without effect (except for
* the incremented counter). By XORing it we keep at least as
@@ -292,7 +297,7 @@ static void ssleay_rand_add(const void *buf, int num, double add)
}
if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
entropy += add;
- if (!add_do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+ if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
#if !defined(THREADS) && !defined(WIN32)
assert(md_c[1] == md_count[1]);
@@ -340,28 +345,31 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
*
* For each group of 10 bytes (or less), we do the following:
*
- * Input into the hash function the top 10 bytes from the
- * local 'md' (which is initialized from the global 'md'
- * before any bytes are generated), the bytes that are
- * to be overwritten by the random bytes, and bytes from the
- * 'state' (incrementing looping index). From this digest output
- * (which is kept in 'md'), the top (up to) 10 bytes are
- * returned to the caller and the bottom (up to) 10 bytes are xored
- * into the 'state'.
+ * Input into the hash function the local 'md' (which is initialized from
+ * the global 'md' before any bytes are generated), the bytes that are to
+ * be overwritten by the random bytes, and bytes from the 'state'
+ * (incrementing looping index). From this digest output (which is kept
+ * in 'md'), the top (up to) 10 bytes are returned to the caller and the
+ * bottom 10 bytes are xored into the 'state'.
+ *
* Finally, after we have finished 'num' random bytes for the
* caller, 'count' (which is incremented) and the local and global 'md'
* are fed into the hash function and the results are kept in the
* global 'md'.
*/
- if (!initialized)
- RAND_poll();
-
CRYPTO_w_lock(CRYPTO_LOCK_RAND);
- add_do_not_lock = 1; /* Since we call ssleay_rand_add while in
- this locked state. */
- initialized = 1;
+ /* prevent ssleay_rand_bytes() from trying to obtain the lock again */
+ crypto_lock_rand = 1;
+ locking_thread = CRYPTO_thread_id();
+
+ if (!initialized)
+ {
+ RAND_poll();
+ initialized = 1;
+ }
+
if (!stirred_pool)
do_stir_pool = 1;
@@ -387,11 +395,11 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
if (do_stir_pool)
{
- /* Our output function chains only half of 'md', so we better
- * make sure that the required entropy gets 'evenly distributed'
- * through 'state', our randomness pool. The input function
- * (ssleay_rand_add) chains all of 'md', which makes it more
- * suitable for this purpose.
+ /* In the output function only half of 'md' remains secret,
+ * so we better make sure that the required entropy gets
+ * 'evenly distributed' through 'state', our randomness pool.
+ * The input function (ssleay_rand_add) chains all of 'md',
+ * which makes it more suitable for this purpose.
*/
int n = STATE_SIZE; /* so that the complete pool gets accessed */
@@ -425,8 +433,9 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
md_count[0] += 1;
- add_do_not_lock = 0; /* If this would ever be forgotten, we can
- expect any evil god to eat our souls. */
+ /* before unlocking, we must clear 'crypto_lock_rand' */
+ crypto_lock_rand = 0;
+ locking_thread = 0;
CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
while (num > 0)
@@ -492,11 +501,12 @@ static int ssleay_rand_bytes(unsigned char *buf, int num)
static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num)
{
int ret;
+ unsigned long err;
ret = RAND_bytes(buf, num);
if (ret == 0)
{
- long err = ERR_peek_error();
+ err = ERR_peek_error();
if (ERR_GET_LIB(err) == ERR_LIB_RAND &&
ERR_GET_REASON(err) == RAND_R_PRNG_NOT_SEEDED)
(void)ERR_get_error();
@@ -507,14 +517,37 @@ static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num)
static int ssleay_rand_status(void)
{
int ret;
+ int do_not_lock;
+ /* check if we already have the lock
+ * (could happen if a RAND_poll() implementation calls RAND_status()) */
+ do_not_lock = crypto_lock_rand && (locking_thread == CRYPTO_thread_id());
+
+ if (!do_not_lock)
+ {
+ CRYPTO_w_lock(CRYPTO_LOCK_RAND);
+
+ /* prevent ssleay_rand_bytes() from trying to obtain the lock again */
+ crypto_lock_rand = 1;
+ locking_thread = CRYPTO_thread_id();
+ }
+
if (!initialized)
+ {
RAND_poll();
+ initialized = 1;
+ }
- CRYPTO_w_lock(CRYPTO_LOCK_RAND);
- initialized = 1;
ret = entropy >= ENTROPY_NEEDED;
- CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+ if (!do_not_lock)
+ {
+ /* before unlocking, we must clear 'crypto_lock_rand' */
+ crypto_lock_rand = 0;
+ locking_thread = 0;
+
+ CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+ }
+
return ret;
}
diff --git a/lib/libcrypto/rand/randfile.c b/lib/libcrypto/rand/randfile.c
index f4376cf8cc5..2ffb84c89e2 100644
--- a/lib/libcrypto/rand/randfile.c
+++ b/lib/libcrypto/rand/randfile.c
@@ -233,6 +233,12 @@ const char *RAND_file_name(char *buf, size_t size)
{
if (issetugid() == 0)
s=getenv("HOME");
+#ifdef DEFAULT_HOME
+ if (s == NULL)
+ {
+ s = DEFAULT_HOME;
+ }
+#endif
if (s && *s && strlen(s)+strlen(RFILE)+2 < size)
{
strlcpy(buf,s,size);
@@ -242,7 +248,7 @@ const char *RAND_file_name(char *buf, size_t size)
strlcat(buf,RFILE,size);
ret=buf;
}
- else
+ else
buf[0] = '\0'; /* no file name */
}