summaryrefslogtreecommitdiff
path: root/sys/crypto/sha2.c
diff options
context:
space:
mode:
authorTodd C. Miller <millert@cvs.openbsd.org>2004-05-03 02:57:48 +0000
committerTodd C. Miller <millert@cvs.openbsd.org>2004-05-03 02:57:48 +0000
commit6e88891651342c15d4d56feba77bed64f9209203 (patch)
tree5d3a7f1827c317336ba3b4b2df0f5f3d8a555077 /sys/crypto/sha2.c
parentbeca111bc08889bdc389b569c83924807e75c1df (diff)
Rev 1.4 was bogus (committed from the wrong tree), this repairs it.
Diffstat (limited to 'sys/crypto/sha2.c')
-rw-r--r--sys/crypto/sha2.c150
1 files changed, 64 insertions, 86 deletions
diff --git a/sys/crypto/sha2.c b/sys/crypto/sha2.c
index 6bd8dd9cfd5..79b42b94afb 100644
--- a/sys/crypto/sha2.c
+++ b/sys/crypto/sha2.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sha2.c,v 1.5 2004/05/03 02:55:56 millert Exp $ */
+/* $OpenBSD: sha2.c,v 1.6 2004/05/03 02:57:36 millert Exp $ */
/*
* FILE: sha2.c
@@ -161,8 +161,8 @@
* only.
*/
void SHA512_Last(SHA512_CTX *);
-void SHA256_Transform(SHA256_CTX *, const u_int32_t *);
-void SHA512_Transform(SHA512_CTX *, const u_int64_t *);
+void SHA256_Transform(SHA256_CTX *, const u_int8_t *);
+void SHA512_Transform(SHA512_CTX *, const u_int8_t *);
/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
@@ -282,41 +282,30 @@ SHA256_Init(SHA256_CTX *context)
/* Unrolled SHA-256 round macros: */
-#if BYTE_ORDER == LITTLE_ENDIAN
-
-#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
- REVERSE32(*data++, W256[j]); \
- T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
- K256[j] + W256[j]; \
- (d) += T1; \
- (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
- j++
-
-
-#else /* BYTE_ORDER == LITTLE_ENDIAN */
-
-#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
- T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
- K256[j] + (W256[j] = *data++); \
- (d) += T1; \
- (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
- j++
-
-#endif /* BYTE_ORDER == LITTLE_ENDIAN */
-
-#define ROUND256(a,b,c,d,e,f,g,h) \
- s0 = W256[(j+1)&0x0f]; \
- s0 = sigma0_256(s0); \
- s1 = W256[(j+14)&0x0f]; \
- s1 = sigma1_256(s1); \
- T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
- (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
- (d) += T1; \
- (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
- j++
+#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) do { \
+ W256[j] = (u_int32_t)data[3] | ((u_int32_t)data[2] << 8) | \
+ ((u_int32_t)data[1] << 16) | ((u_int32_t)data[0] << 24); \
+ data += 4; \
+ T1 = (h) + Sigma1_256((e)) + Ch((e), (f), (g)) + K256[j] + W256[j]; \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256((a)) + Maj((a), (b), (c)); \
+ j++; \
+} while(0)
+
+#define ROUND256(a,b,c,d,e,f,g,h) do { \
+ s0 = W256[(j+1)&0x0f]; \
+ s0 = sigma0_256(s0); \
+ s1 = W256[(j+14)&0x0f]; \
+ s1 = sigma1_256(s1); \
+ T1 = (h) + Sigma1_256((e)) + Ch((e), (f), (g)) + K256[j] + \
+ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256((a)) + Maj((a), (b), (c)); \
+ j++; \
+} while(0)
void
-SHA256_Transform(SHA256_CTX *context, const u_int32_t *data)
+SHA256_Transform(SHA256_CTX *context, const u_int8_t *data)
{
u_int32_t a, b, c, d, e, f, g, h, s0, s1;
u_int32_t T1, *W256;
@@ -376,7 +365,7 @@ SHA256_Transform(SHA256_CTX *context, const u_int32_t *data)
#else /* SHA2_UNROLL_TRANSFORM */
void
-SHA256_Transform(SHA256_CTX *context, const u_int32_t *data)
+SHA256_Transform(SHA256_CTX *context, const u_int8_t *data)
{
u_int32_t a, b, c, d, e, f, g, h, s0, s1;
u_int32_t T1, T2, *W256;
@@ -396,15 +385,11 @@ SHA256_Transform(SHA256_CTX *context, const u_int32_t *data)
j = 0;
do {
-#if BYTE_ORDER == LITTLE_ENDIAN
- /* Copy data while converting to host byte order */
- REVERSE32(*data++, W256[j]);
+ W256[j] = (u_int32_t)data[3] | ((u_int32_t)data[2] << 8) |
+ ((u_int32_t)data[1] << 16) | ((u_int32_t)data[0] << 24);
+ data += 4;
/* Apply the SHA-256 compression function to update a..h */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
-#else /* BYTE_ORDER == LITTLE_ENDIAN */
- /* Apply the SHA-256 compression function to update a..h with copy */
- T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
-#endif /* BYTE_ORDER == LITTLE_ENDIAN */
T2 = Sigma0_256(a) + Maj(a, b, c);
h = g;
g = f;
@@ -477,7 +462,7 @@ SHA256_Update(SHA256_CTX *context, const u_int8_t *data, size_t len)
context->bitcount += freespace << 3;
len -= freespace;
data += freespace;
- SHA256_Transform(context, (u_int32_t *)context->buffer);
+ SHA256_Transform(context, context->buffer);
} else {
/* The buffer is not yet full */
bcopy(data, &context->buffer[usedspace], len);
@@ -489,7 +474,7 @@ SHA256_Update(SHA256_CTX *context, const u_int8_t *data, size_t len)
}
while (len >= SHA256_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
- SHA256_Transform(context, (const u_int32_t *)data);
+ SHA256_Transform(context, data);
context->bitcount += SHA256_BLOCK_LENGTH << 3;
len -= SHA256_BLOCK_LENGTH;
data += SHA256_BLOCK_LENGTH;
@@ -528,7 +513,7 @@ SHA256_Final(u_int8_t digest[], SHA256_CTX *context)
bzero(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace);
}
/* Do second-to-last transform: */
- SHA256_Transform(context, (u_int32_t *)context->buffer);
+ SHA256_Transform(context, context->buffer);
/* And set-up for the last transform: */
bzero(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
@@ -544,7 +529,7 @@ SHA256_Final(u_int8_t digest[], SHA256_CTX *context)
*(u_int64_t *)&context->buffer[SHA256_SHORT_BLOCK_LENGTH] = context->bitcount;
/* Final transform: */
- SHA256_Transform(context, (u_int32_t *)context->buffer);
+ SHA256_Transform(context, context->buffer);
#if BYTE_ORDER == LITTLE_ENDIAN
{
@@ -580,41 +565,34 @@ SHA512_Init(SHA512_CTX *context)
#ifdef SHA2_UNROLL_TRANSFORM
/* Unrolled SHA-512 round macros: */
-#if BYTE_ORDER == LITTLE_ENDIAN
-
-#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
- REVERSE64(*data++, W512[j]); \
- T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
- K512[j] + W512[j]; \
- (d) += T1, \
- (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
- j++
-
-
-#else /* BYTE_ORDER == LITTLE_ENDIAN */
-
-#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
- T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
- K512[j] + (W512[j] = *data++); \
- (d) += T1; \
- (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
- j++
-
-#endif /* BYTE_ORDER == LITTLE_ENDIAN */
-#define ROUND512(a,b,c,d,e,f,g,h) \
- s0 = W512[(j+1)&0x0f]; \
- s0 = sigma0_512(s0); \
- s1 = W512[(j+14)&0x0f]; \
- s1 = sigma1_512(s1); \
- T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
- (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
- (d) += T1; \
- (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
- j++
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) do { \
+ W512[j] = (u_int64_t)data[7] | ((u_int64_t)data[6] << 8) | \
+ ((u_int64_t)data[5] << 16) | ((u_int64_t)data[4] << 24) | \
+ ((u_int64_t)data[3] << 32) | ((u_int64_t)data[2] << 40) | \
+ ((u_int64_t)data[1] << 48) | ((u_int64_t)data[0] << 56); \
+ data += 8; \
+ T1 = (h) + Sigma1_512((e)) + Ch((e), (f), (g)) + K512[j] + W512[j]; \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512((a)) + Maj((a), (b), (c)); \
+ j++; \
+} while(0)
+
+
+#define ROUND512(a,b,c,d,e,f,g,h) do { \
+ s0 = W512[(j+1)&0x0f]; \
+ s0 = sigma0_512(s0); \
+ s1 = W512[(j+14)&0x0f]; \
+ s1 = sigma1_512(s1); \
+ T1 = (h) + Sigma1_512((e)) + Ch((e), (f), (g)) + K512[j] + \
+ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512((a)) + Maj((a), (b), (c)); \
+ j++; \
+} while(0)
void
-SHA512_Transform(SHA512_CTX *context, const u_int64_t *data)
+SHA512_Transform(SHA512_CTX *context, const u_int8_t *data)
{
u_int64_t a, b, c, d, e, f, g, h, s0, s1;
u_int64_t T1, *W512 = (u_int64_t *)context->buffer;
@@ -671,7 +649,7 @@ SHA512_Transform(SHA512_CTX *context, const u_int64_t *data)
#else /* SHA2_UNROLL_TRANSFORM */
void
-SHA512_Transform(SHA512_CTX *context, const u_int64_t *data)
+SHA512_Transform(SHA512_CTX *context, const u_int8_t *data)
{
u_int64_t a, b, c, d, e, f, g, h, s0, s1;
u_int64_t T1, T2, *W512 = (u_int64_t *)context->buffer;
@@ -693,7 +671,7 @@ SHA512_Transform(SHA512_CTX *context, const u_int64_t *data)
((u_int64_t)data[5] << 16) | ((u_int64_t)data[4] << 24) |
((u_int64_t)data[3] << 32) | ((u_int64_t)data[2] << 40) |
((u_int64_t)data[1] << 48) | ((u_int64_t)data[0] << 56);
- data++;
+ data += 8;
/* Apply the SHA-512 compression function to update a..h */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
T2 = Sigma0_512(a) + Maj(a, b, c);
@@ -768,7 +746,7 @@ SHA512_Update(SHA512_CTX *context, const u_int8_t *data, size_t len)
ADDINC128(context->bitcount, freespace << 3);
len -= freespace;
data += freespace;
- SHA512_Transform(context, (u_int64_t *)context->buffer);
+ SHA512_Transform(context, context->buffer);
} else {
/* The buffer is not yet full */
bcopy(data, &context->buffer[usedspace], len);
@@ -780,7 +758,7 @@ SHA512_Update(SHA512_CTX *context, const u_int8_t *data, size_t len)
}
while (len >= SHA512_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
- SHA512_Transform(context, (const u_int64_t *)data);
+ SHA512_Transform(context, data);
ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
len -= SHA512_BLOCK_LENGTH;
data += SHA512_BLOCK_LENGTH;
@@ -817,7 +795,7 @@ SHA512_Last(SHA512_CTX *context)
bzero(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace);
}
/* Do second-to-last transform: */
- SHA512_Transform(context, (u_int64_t *)context->buffer);
+ SHA512_Transform(context, context->buffer);
/* And set-up for the last transform: */
bzero(context->buffer, SHA512_BLOCK_LENGTH - 2);
@@ -834,7 +812,7 @@ SHA512_Last(SHA512_CTX *context)
*(u_int64_t *)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8] = context->bitcount[0];
/* Final transform: */
- SHA512_Transform(context, (u_int64_t *)context->buffer);
+ SHA512_Transform(context, context->buffer);
}
void