summaryrefslogtreecommitdiff
path: root/usr.bin
diff options
context:
space:
mode:
Diffstat (limited to 'usr.bin')
-rw-r--r--usr.bin/ssh/cipher.c67
-rw-r--r--usr.bin/ssh/cipher.h10
-rw-r--r--usr.bin/ssh/rijndael.c771
-rw-r--r--usr.bin/ssh/rijndael.h46
4 files changed, 537 insertions, 357 deletions
diff --git a/usr.bin/ssh/cipher.c b/usr.bin/ssh/cipher.c
index 46ca830e3b9..788028c1353 100644
--- a/usr.bin/ssh/cipher.c
+++ b/usr.bin/ssh/cipher.c
@@ -35,7 +35,7 @@
*/
#include "includes.h"
-RCSID("$OpenBSD: cipher.c,v 1.39 2000/12/06 23:05:42 markus Exp $");
+RCSID("$OpenBSD: cipher.c,v 1.40 2000/12/09 13:41:52 markus Exp $");
#include "ssh.h"
#include "xmalloc.h"
@@ -285,45 +285,40 @@ cast_cbc_decrypt(CipherContext *cc, u_char *dest, const u_char *src, u_int len)
/* RIJNDAEL */
#define RIJNDAEL_BLOCKSIZE 16
-
void
rijndael_setkey(CipherContext *cc, const u_char *key, u_int keylen)
{
- if (rijndael_makekey(&cc->u.rijndael.enc, RIJNDAEL_ENCRYPT,
- 8*keylen, (char *)key) == -1)
- fatal("rijndael_setkey: RIJNDAEL_ENCRYPT");
- if (rijndael_makekey(&cc->u.rijndael.dec, RIJNDAEL_DECRYPT,
- 8*keylen, (char *)key) == -1)
- fatal("rijndael_setkey: RIJNDAEL_DECRYPT");
+ rijndael_set_key(&cc->u.rijndael.enc, (u4byte *)key, 8*keylen, 1);
+ rijndael_set_key(&cc->u.rijndael.dec, (u4byte *)key, 8*keylen, 0);
}
void
rijndael_setiv(CipherContext *cc, const u_char *iv, u_int ivlen)
{
- if (iv == NULL || ivlen != RIJNDAEL_BLOCKSIZE)
- fatal("bad/no IV for %s.", cc->cipher->name);
- memcpy(cc->u.rijndael.iv, iv, RIJNDAEL_BLOCKSIZE);
+ if (iv == NULL)
+ fatal("no IV for %s.", cc->cipher->name);
+ memcpy((u_char *)cc->u.rijndael.iv, iv, RIJNDAEL_BLOCKSIZE);
}
-
void
rijndael_cbc_encrypt(CipherContext *cc, u_char *dest, const u_char *src,
u_int len)
{
- rijndael_key *ctx = &cc->u.rijndael.enc;
- u_char *iv = cc->u.rijndael.iv;
- u_char in[RIJNDAEL_BLOCKSIZE];
- u_char *cprev, *cnow, *plain;
- int i, j, blocks = len / RIJNDAEL_BLOCKSIZE;
+ rijndael_ctx *ctx = &cc->u.rijndael.enc;
+ u4byte *iv = cc->u.rijndael.iv;
+ u4byte in[4];
+ u4byte *cprev, *cnow, *plain;
+ int i, blocks = len / RIJNDAEL_BLOCKSIZE;
if (len == 0)
return;
if (len % RIJNDAEL_BLOCKSIZE)
fatal("rijndael_cbc_encrypt: bad len %d", len);
- cnow = dest;
- plain = (u_char *) src;
+ cnow = (u4byte*) dest;
+ plain = (u4byte*) src;
cprev = iv;
- for(i = 0; i < blocks; i++, plain+=RIJNDAEL_BLOCKSIZE,
- cnow+=RIJNDAEL_BLOCKSIZE) {
- for (j = 0; j < RIJNDAEL_BLOCKSIZE; j++)
- in[j] = plain[j] ^ cprev[j];
+ for(i = 0; i < blocks; i++, plain+=4, cnow+=4) {
+ in[0] = plain[0] ^ cprev[0];
+ in[1] = plain[1] ^ cprev[1];
+ in[2] = plain[2] ^ cprev[2];
+ in[3] = plain[3] ^ cprev[3];
rijndael_encrypt(ctx, in, cnow);
cprev = cnow;
}
@@ -334,25 +329,25 @@ void
rijndael_cbc_decrypt(CipherContext *cc, u_char *dest, const u_char *src,
u_int len)
{
- rijndael_key *ctx = &cc->u.rijndael.dec;
- u_char *iv = cc->u.rijndael.iv;
- u_char ivsaved[RIJNDAEL_BLOCKSIZE];
- u_char *cnow = (u_char *) (src+len-RIJNDAEL_BLOCKSIZE);
- u_char *plain = dest+len-RIJNDAEL_BLOCKSIZE;
- u_char *ivp;
- int i, j, blocks = len / RIJNDAEL_BLOCKSIZE;
+ rijndael_ctx *ctx = &cc->u.rijndael.dec;
+ u4byte *iv = cc->u.rijndael.iv;
+ u4byte ivsaved[4];
+ u4byte *cnow = (u4byte*) (src+len-RIJNDAEL_BLOCKSIZE);
+ u4byte *plain = (u4byte*) (dest+len-RIJNDAEL_BLOCKSIZE);
+ u4byte *ivp;
+ int i, blocks = len / RIJNDAEL_BLOCKSIZE;
if (len == 0)
return;
if (len % RIJNDAEL_BLOCKSIZE)
fatal("rijndael_cbc_decrypt: bad len %d", len);
memcpy(ivsaved, cnow, RIJNDAEL_BLOCKSIZE);
- for(i = blocks; i > 0; i--, cnow-=RIJNDAEL_BLOCKSIZE,
- plain-=RIJNDAEL_BLOCKSIZE) {
+ for(i = blocks; i > 0; i--, cnow-=4, plain-=4) {
rijndael_decrypt(ctx, cnow, plain);
- //rijndael_decrypt(cnow, plain, ctx->keySched, ctx->ROUNDS);
- ivp = (i == 1) ? iv : cnow-RIJNDAEL_BLOCKSIZE;
- for (j = 0; j < RIJNDAEL_BLOCKSIZE; j++)
- plain[j] ^= ivp[j];
+ ivp = (i == 1) ? iv : cnow-4;
+ plain[0] ^= ivp[0];
+ plain[1] ^= ivp[1];
+ plain[2] ^= ivp[2];
+ plain[3] ^= ivp[3];
}
memcpy(iv, ivsaved, RIJNDAEL_BLOCKSIZE);
}
diff --git a/usr.bin/ssh/cipher.h b/usr.bin/ssh/cipher.h
index 0223ace3fe9..752d0347e13 100644
--- a/usr.bin/ssh/cipher.h
+++ b/usr.bin/ssh/cipher.h
@@ -32,7 +32,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* RCSID("$OpenBSD: cipher.h,v 1.23 2000/12/06 23:05:42 markus Exp $"); */
+/* RCSID("$OpenBSD: cipher.h,v 1.24 2000/12/09 13:41:52 markus Exp $"); */
#ifndef CIPHER_H
#define CIPHER_H
@@ -41,9 +41,7 @@
#include <openssl/blowfish.h>
#include <openssl/rc4.h>
#include <openssl/cast.h>
-
#include "rijndael.h"
-
/*
* Cipher types for SSH-1. New types can be added, but old types should not
* be removed for compatibility. The maximum allowed value is 31.
@@ -86,9 +84,9 @@ struct CipherContext {
u_char iv[8];
} cast;
struct {
- u_char iv[16];
- rijndael_key enc;
- rijndael_key dec;
+ u4byte iv[4];
+ rijndael_ctx enc;
+ rijndael_ctx dec;
} rijndael;
RC4_KEY rc4;
} u;
diff --git a/usr.bin/ssh/rijndael.c b/usr.bin/ssh/rijndael.c
index 27c1f81bc7a..28579a99216 100644
--- a/usr.bin/ssh/rijndael.c
+++ b/usr.bin/ssh/rijndael.c
@@ -1,310 +1,493 @@
-/*
- * rijndael-alg-fst.c v2.4 April '2000
- * rijndael-alg-api.c v2.4 April '2000
- *
- * Optimised ANSI C code
- *
- * authors: v1.0: Antoon Bosselaers
- * v2.0: Vincent Rijmen, K.U.Leuven
- * v2.3: Paulo Barreto
- * v2.4: Vincent Rijmen, K.U.Leuven
- *
- * This code is placed in the public domain.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
+/* $OpenBSD: rijndael.c,v 1.5 2000/12/09 13:41:51 markus Exp $ */
+/* This is an independent implementation of the encryption algorithm: */
+/* */
+/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
+/* */
+/* which is a candidate algorithm in the Advanced Encryption Standard */
+/* programme of the US National Institute of Standards and Technology. */
+/* */
+/* Copyright in this implementation is held by Dr B R Gladman but I */
+/* hereby give permission for its free direct or derivative use subject */
+/* to acknowledgment of its origin and compliance with any conditions */
+/* that the originators of the algorithm place on its exploitation. */
+/* */
+/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
+
+/* Timing data for Rijndael (rijndael.c)
+
+Algorithm: rijndael (rijndael.c)
+
+128 bit key:
+Key Setup: 305/1389 cycles (encrypt/decrypt)
+Encrypt: 374 cycles = 68.4 mbits/sec
+Decrypt: 352 cycles = 72.7 mbits/sec
+Mean: 363 cycles = 70.5 mbits/sec
+
+192 bit key:
+Key Setup: 277/1595 cycles (encrypt/decrypt)
+Encrypt: 439 cycles = 58.3 mbits/sec
+Decrypt: 425 cycles = 60.2 mbits/sec
+Mean: 432 cycles = 59.3 mbits/sec
+
+256 bit key:
+Key Setup: 374/1960 cycles (encrypt/decrypt)
+Encrypt: 502 cycles = 51.0 mbits/sec
+Decrypt: 498 cycles = 51.4 mbits/sec
+Mean: 500 cycles = 51.2 mbits/sec
+
+*/
+
+#include <sys/types.h>
#include "rijndael.h"
-#include "rijndael_boxes.h"
-int
-rijndael_keysched(u_int8_t k[RIJNDAEL_MAXKC][4],
- u_int8_t W[RIJNDAEL_MAXROUNDS+1][4][4], int ROUNDS)
+void gen_tabs __P((void));
+
+/* 3. Basic macros for speeding up generic operations */
+
+/* Circular rotate of 32 bit values */
+
+#define rotr(x,n) (((x) >> ((int)(n))) | ((x) << (32 - (int)(n))))
+#define rotl(x,n) (((x) << ((int)(n))) | ((x) >> (32 - (int)(n))))
+
+/* Invert byte order in a 32 bit variable */
+
+#define bswap(x) (rotl(x, 8) & 0x00ff00ff | rotr(x, 8) & 0xff00ff00)
+
+/* Extract byte from a 32 bit quantity (little endian notation) */
+
+#define byte(x,n) ((u1byte)((x) >> (8 * n)))
+
+#if BYTE_ORDER != LITTLE_ENDIAN
+#define BLOCK_SWAP
+#endif
+
+/* For inverting byte order in input/output 32 bit words if needed */
+
+#ifdef BLOCK_SWAP
+#define BYTE_SWAP
+#define WORD_SWAP
+#endif
+
+#ifdef BYTE_SWAP
+#define io_swap(x) bswap(x)
+#else
+#define io_swap(x) (x)
+#endif
+
+/* For inverting the byte order of input/output blocks if needed */
+
+#ifdef WORD_SWAP
+
+#define get_block(x) \
+ ((u4byte*)(x))[0] = io_swap(in_blk[3]); \
+ ((u4byte*)(x))[1] = io_swap(in_blk[2]); \
+ ((u4byte*)(x))[2] = io_swap(in_blk[1]); \
+ ((u4byte*)(x))[3] = io_swap(in_blk[0])
+
+#define put_block(x) \
+ out_blk[3] = io_swap(((u4byte*)(x))[0]); \
+ out_blk[2] = io_swap(((u4byte*)(x))[1]); \
+ out_blk[1] = io_swap(((u4byte*)(x))[2]); \
+ out_blk[0] = io_swap(((u4byte*)(x))[3])
+
+#define get_key(x,len) \
+ ((u4byte*)(x))[4] = ((u4byte*)(x))[5] = \
+ ((u4byte*)(x))[6] = ((u4byte*)(x))[7] = 0; \
+ switch((((len) + 63) / 64)) { \
+ case 2: \
+ ((u4byte*)(x))[0] = io_swap(in_key[3]); \
+ ((u4byte*)(x))[1] = io_swap(in_key[2]); \
+ ((u4byte*)(x))[2] = io_swap(in_key[1]); \
+ ((u4byte*)(x))[3] = io_swap(in_key[0]); \
+ break; \
+ case 3: \
+ ((u4byte*)(x))[0] = io_swap(in_key[5]); \
+ ((u4byte*)(x))[1] = io_swap(in_key[4]); \
+ ((u4byte*)(x))[2] = io_swap(in_key[3]); \
+ ((u4byte*)(x))[3] = io_swap(in_key[2]); \
+ ((u4byte*)(x))[4] = io_swap(in_key[1]); \
+ ((u4byte*)(x))[5] = io_swap(in_key[0]); \
+ break; \
+ case 4: \
+ ((u4byte*)(x))[0] = io_swap(in_key[7]); \
+ ((u4byte*)(x))[1] = io_swap(in_key[6]); \
+ ((u4byte*)(x))[2] = io_swap(in_key[5]); \
+ ((u4byte*)(x))[3] = io_swap(in_key[4]); \
+ ((u4byte*)(x))[4] = io_swap(in_key[3]); \
+ ((u4byte*)(x))[5] = io_swap(in_key[2]); \
+ ((u4byte*)(x))[6] = io_swap(in_key[1]); \
+ ((u4byte*)(x))[7] = io_swap(in_key[0]); \
+ }
+
+#else
+
+#define get_block(x) \
+ ((u4byte*)(x))[0] = io_swap(in_blk[0]); \
+ ((u4byte*)(x))[1] = io_swap(in_blk[1]); \
+ ((u4byte*)(x))[2] = io_swap(in_blk[2]); \
+ ((u4byte*)(x))[3] = io_swap(in_blk[3])
+
+#define put_block(x) \
+ out_blk[0] = io_swap(((u4byte*)(x))[0]); \
+ out_blk[1] = io_swap(((u4byte*)(x))[1]); \
+ out_blk[2] = io_swap(((u4byte*)(x))[2]); \
+ out_blk[3] = io_swap(((u4byte*)(x))[3])
+
+#define get_key(x,len) \
+ ((u4byte*)(x))[4] = ((u4byte*)(x))[5] = \
+ ((u4byte*)(x))[6] = ((u4byte*)(x))[7] = 0; \
+ switch((((len) + 63) / 64)) { \
+ case 4: \
+ ((u4byte*)(x))[6] = io_swap(in_key[6]); \
+ ((u4byte*)(x))[7] = io_swap(in_key[7]); \
+ case 3: \
+ ((u4byte*)(x))[4] = io_swap(in_key[4]); \
+ ((u4byte*)(x))[5] = io_swap(in_key[5]); \
+ case 2: \
+ ((u4byte*)(x))[0] = io_swap(in_key[0]); \
+ ((u4byte*)(x))[1] = io_swap(in_key[1]); \
+ ((u4byte*)(x))[2] = io_swap(in_key[2]); \
+ ((u4byte*)(x))[3] = io_swap(in_key[3]); \
+ }
+
+#endif
+
+#define LARGE_TABLES
+
+u1byte pow_tab[256];
+u1byte log_tab[256];
+u1byte sbx_tab[256];
+u1byte isb_tab[256];
+u4byte rco_tab[ 10];
+u4byte ft_tab[4][256];
+u4byte it_tab[4][256];
+
+#ifdef LARGE_TABLES
+ u4byte fl_tab[4][256];
+ u4byte il_tab[4][256];
+#endif
+
+u4byte tab_gen = 0;
+
+#define ff_mult(a,b) (a && b ? pow_tab[(log_tab[a] + log_tab[b]) % 255] : 0)
+
+#define f_rn(bo, bi, n, k) \
+ bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
+ ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
+ ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
+ ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
+
+#define i_rn(bo, bi, n, k) \
+ bo[n] = it_tab[0][byte(bi[n],0)] ^ \
+ it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
+ it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
+ it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
+
+#ifdef LARGE_TABLES
+
+#define ls_box(x) \
+ ( fl_tab[0][byte(x, 0)] ^ \
+ fl_tab[1][byte(x, 1)] ^ \
+ fl_tab[2][byte(x, 2)] ^ \
+ fl_tab[3][byte(x, 3)] )
+
+#define f_rl(bo, bi, n, k) \
+ bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
+ fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
+ fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
+ fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
+
+#define i_rl(bo, bi, n, k) \
+ bo[n] = il_tab[0][byte(bi[n],0)] ^ \
+ il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
+ il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
+ il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
+
+#else
+
+#define ls_box(x) \
+ ((u4byte)sbx_tab[byte(x, 0)] << 0) ^ \
+ ((u4byte)sbx_tab[byte(x, 1)] << 8) ^ \
+ ((u4byte)sbx_tab[byte(x, 2)] << 16) ^ \
+ ((u4byte)sbx_tab[byte(x, 3)] << 24)
+
+#define f_rl(bo, bi, n, k) \
+ bo[n] = (u4byte)sbx_tab[byte(bi[n],0)] ^ \
+ rotl(((u4byte)sbx_tab[byte(bi[(n + 1) & 3],1)]), 8) ^ \
+ rotl(((u4byte)sbx_tab[byte(bi[(n + 2) & 3],2)]), 16) ^ \
+ rotl(((u4byte)sbx_tab[byte(bi[(n + 3) & 3],3)]), 24) ^ *(k + n)
+
+#define i_rl(bo, bi, n, k) \
+ bo[n] = (u4byte)isb_tab[byte(bi[n],0)] ^ \
+ rotl(((u4byte)isb_tab[byte(bi[(n + 3) & 3],1)]), 8) ^ \
+ rotl(((u4byte)isb_tab[byte(bi[(n + 2) & 3],2)]), 16) ^ \
+ rotl(((u4byte)isb_tab[byte(bi[(n + 1) & 3],3)]), 24) ^ *(k + n)
+
+#endif
+
+void
+gen_tabs(void)
{
- /* Calculate the necessary round keys
- * The number of calculations depends on keyBits and blockBits
- */
- int j, r, t, rconpointer = 0;
- u_int8_t tk[RIJNDAEL_MAXKC][4];
- int KC = ROUNDS - 6;
-
- for (j = KC-1; j >= 0; j--) {
- *((u_int32_t*)tk[j]) = *((u_int32_t*)k[j]);
+ u4byte i, t;
+ u1byte p, q;
+
+ /* log and power tables for GF(2**8) finite field with */
+ /* 0x11b as modular polynomial - the simplest prmitive */
+ /* root is 0x11, used here to generate the tables */
+
+ for(i = 0,p = 1; i < 256; ++i) {
+ pow_tab[i] = (u1byte)p; log_tab[p] = (u1byte)i;
+
+ p = p ^ (p << 1) ^ (p & 0x80 ? 0x01b : 0);
}
- r = 0;
- t = 0;
- /* copy values into round key array */
- for (j = 0; (j < KC) && (r < ROUNDS + 1); ) {
- for (; (j < KC) && (t < 4); j++, t++) {
- *((u_int32_t*)W[r][t]) = *((u_int32_t*)tk[j]);
- }
- if (t == 4) {
- r++;
- t = 0;
- }
+
+ log_tab[1] = 0; p = 1;
+
+ for(i = 0; i < 10; ++i) {
+ rco_tab[i] = p;
+
+ p = (p << 1) ^ (p & 0x80 ? 0x1b : 0);
}
-
- while (r < ROUNDS + 1) { /* while not enough round key material calculated */
- /* calculate new values */
- tk[0][0] ^= S[tk[KC-1][1]];
- tk[0][1] ^= S[tk[KC-1][2]];
- tk[0][2] ^= S[tk[KC-1][3]];
- tk[0][3] ^= S[tk[KC-1][0]];
- tk[0][0] ^= rcon[rconpointer++];
-
- if (KC != 8) {
- for (j = 1; j < KC; j++) {
- *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]);
- }
- } else {
- for (j = 1; j < KC/2; j++) {
- *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]);
- }
- tk[KC/2][0] ^= S[tk[KC/2 - 1][0]];
- tk[KC/2][1] ^= S[tk[KC/2 - 1][1]];
- tk[KC/2][2] ^= S[tk[KC/2 - 1][2]];
- tk[KC/2][3] ^= S[tk[KC/2 - 1][3]];
- for (j = KC/2 + 1; j < KC; j++) {
- *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]);
- }
- }
- /* copy values into round key array */
- for (j = 0; (j < KC) && (r < ROUNDS + 1); ) {
- for (; (j < KC) && (t < 4); j++, t++) {
- *((u_int32_t*)W[r][t]) = *((u_int32_t*)tk[j]);
- }
- if (t == 4) {
- r++;
- t = 0;
- }
- }
- }
- return 0;
-}
-int
-rijndael_key_enc_to_dec(u_int8_t W[RIJNDAEL_MAXROUNDS+1][4][4], int ROUNDS)
-{
- int r;
- u_int8_t *w;
-
- for (r = 1; r < ROUNDS; r++) {
- w = W[r][0];
- *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]])
- ^ *((u_int32_t*)U2[w[1]])
- ^ *((u_int32_t*)U3[w[2]])
- ^ *((u_int32_t*)U4[w[3]]);
-
- w = W[r][1];
- *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]])
- ^ *((u_int32_t*)U2[w[1]])
- ^ *((u_int32_t*)U3[w[2]])
- ^ *((u_int32_t*)U4[w[3]]);
-
- w = W[r][2];
- *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]])
- ^ *((u_int32_t*)U2[w[1]])
- ^ *((u_int32_t*)U3[w[2]])
- ^ *((u_int32_t*)U4[w[3]]);
-
- w = W[r][3];
- *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]])
- ^ *((u_int32_t*)U2[w[1]])
- ^ *((u_int32_t*)U3[w[2]])
- ^ *((u_int32_t*)U4[w[3]]);
+ /* note that the affine byte transformation matrix in */
+ /* rijndael specification is in big endian format with */
+ /* bit 0 as the most significant bit. In the remainder */
+ /* of the specification the bits are numbered from the */
+ /* least significant end of a byte. */
+
+ for(i = 0; i < 256; ++i) {
+ p = (i ? pow_tab[255 - log_tab[i]] : 0); q = p;
+ q = (q >> 7) | (q << 1); p ^= q;
+ q = (q >> 7) | (q << 1); p ^= q;
+ q = (q >> 7) | (q << 1); p ^= q;
+ q = (q >> 7) | (q << 1); p ^= q ^ 0x63;
+ sbx_tab[i] = (u1byte)p; isb_tab[p] = (u1byte)i;
}
- return 0;
-}
-
-/**
- * Encrypt a single block.
- */
-int
-rijndael_encrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16])
-{
- u_int8_t (*rk)[4][4] = key->keySched;
- int ROUNDS = key->ROUNDS;
- int r;
- u_int8_t temp[4][4];
-
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(a )) ^ *((u_int32_t*)rk[0][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(a+ 4)) ^ *((u_int32_t*)rk[0][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(a+ 8)) ^ *((u_int32_t*)rk[0][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(a+12)) ^ *((u_int32_t*)rk[0][3]);
- *((u_int32_t*)(b )) = *((u_int32_t*)T1[temp[0][0]])
- ^ *((u_int32_t*)T2[temp[1][1]])
- ^ *((u_int32_t*)T3[temp[2][2]])
- ^ *((u_int32_t*)T4[temp[3][3]]);
- *((u_int32_t*)(b + 4)) = *((u_int32_t*)T1[temp[1][0]])
- ^ *((u_int32_t*)T2[temp[2][1]])
- ^ *((u_int32_t*)T3[temp[3][2]])
- ^ *((u_int32_t*)T4[temp[0][3]]);
- *((u_int32_t*)(b + 8)) = *((u_int32_t*)T1[temp[2][0]])
- ^ *((u_int32_t*)T2[temp[3][1]])
- ^ *((u_int32_t*)T3[temp[0][2]])
- ^ *((u_int32_t*)T4[temp[1][3]]);
- *((u_int32_t*)(b +12)) = *((u_int32_t*)T1[temp[3][0]])
- ^ *((u_int32_t*)T2[temp[0][1]])
- ^ *((u_int32_t*)T3[temp[1][2]])
- ^ *((u_int32_t*)T4[temp[2][3]]);
- for (r = 1; r < ROUNDS-1; r++) {
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[r][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[r][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[r][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[r][3]);
-
- *((u_int32_t*)(b )) = *((u_int32_t*)T1[temp[0][0]])
- ^ *((u_int32_t*)T2[temp[1][1]])
- ^ *((u_int32_t*)T3[temp[2][2]])
- ^ *((u_int32_t*)T4[temp[3][3]]);
- *((u_int32_t*)(b + 4)) = *((u_int32_t*)T1[temp[1][0]])
- ^ *((u_int32_t*)T2[temp[2][1]])
- ^ *((u_int32_t*)T3[temp[3][2]])
- ^ *((u_int32_t*)T4[temp[0][3]]);
- *((u_int32_t*)(b + 8)) = *((u_int32_t*)T1[temp[2][0]])
- ^ *((u_int32_t*)T2[temp[3][1]])
- ^ *((u_int32_t*)T3[temp[0][2]])
- ^ *((u_int32_t*)T4[temp[1][3]]);
- *((u_int32_t*)(b +12)) = *((u_int32_t*)T1[temp[3][0]])
- ^ *((u_int32_t*)T2[temp[0][1]])
- ^ *((u_int32_t*)T3[temp[1][2]])
- ^ *((u_int32_t*)T4[temp[2][3]]);
+
+ for(i = 0; i < 256; ++i) {
+ p = sbx_tab[i];
+
+#ifdef LARGE_TABLES
+
+ t = p; fl_tab[0][i] = t;
+ fl_tab[1][i] = rotl(t, 8);
+ fl_tab[2][i] = rotl(t, 16);
+ fl_tab[3][i] = rotl(t, 24);
+#endif
+ t = ((u4byte)ff_mult(2, p)) |
+ ((u4byte)p << 8) |
+ ((u4byte)p << 16) |
+ ((u4byte)ff_mult(3, p) << 24);
+
+ ft_tab[0][i] = t;
+ ft_tab[1][i] = rotl(t, 8);
+ ft_tab[2][i] = rotl(t, 16);
+ ft_tab[3][i] = rotl(t, 24);
+
+ p = isb_tab[i];
+
+#ifdef LARGE_TABLES
+
+ t = p; il_tab[0][i] = t;
+ il_tab[1][i] = rotl(t, 8);
+ il_tab[2][i] = rotl(t, 16);
+ il_tab[3][i] = rotl(t, 24);
+#endif
+ t = ((u4byte)ff_mult(14, p)) |
+ ((u4byte)ff_mult( 9, p) << 8) |
+ ((u4byte)ff_mult(13, p) << 16) |
+ ((u4byte)ff_mult(11, p) << 24);
+
+ it_tab[0][i] = t;
+ it_tab[1][i] = rotl(t, 8);
+ it_tab[2][i] = rotl(t, 16);
+ it_tab[3][i] = rotl(t, 24);
}
- /* last round is special */
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[ROUNDS-1][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[ROUNDS-1][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[ROUNDS-1][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[ROUNDS-1][3]);
- b[ 0] = T1[temp[0][0]][1];
- b[ 1] = T1[temp[1][1]][1];
- b[ 2] = T1[temp[2][2]][1];
- b[ 3] = T1[temp[3][3]][1];
- b[ 4] = T1[temp[1][0]][1];
- b[ 5] = T1[temp[2][1]][1];
- b[ 6] = T1[temp[3][2]][1];
- b[ 7] = T1[temp[0][3]][1];
- b[ 8] = T1[temp[2][0]][1];
- b[ 9] = T1[temp[3][1]][1];
- b[10] = T1[temp[0][2]][1];
- b[11] = T1[temp[1][3]][1];
- b[12] = T1[temp[3][0]][1];
- b[13] = T1[temp[0][1]][1];
- b[14] = T1[temp[1][2]][1];
- b[15] = T1[temp[2][3]][1];
- *((u_int32_t*)(b )) ^= *((u_int32_t*)rk[ROUNDS][0]);
- *((u_int32_t*)(b+ 4)) ^= *((u_int32_t*)rk[ROUNDS][1]);
- *((u_int32_t*)(b+ 8)) ^= *((u_int32_t*)rk[ROUNDS][2]);
- *((u_int32_t*)(b+12)) ^= *((u_int32_t*)rk[ROUNDS][3]);
-
- return 0;
+
+ tab_gen = 1;
}
-/**
- * Decrypt a single block.
- */
-int
-rijndael_decrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16])
-{
- u_int8_t (*rk)[4][4] = key->keySched;
- int ROUNDS = key->ROUNDS;
- int r;
- u_int8_t temp[4][4];
+#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
+
+#define imix_col(y,x) \
+ u = star_x(x); \
+ v = star_x(u); \
+ w = star_x(v); \
+ t = w ^ (x); \
+ (y) = u ^ v ^ w; \
+ (y) ^= rotr(u ^ t, 8) ^ \
+ rotr(v ^ t, 16) ^ \
+ rotr(t,24)
+
+/* initialise the key schedule from the user supplied key */
+
+#define loop4(i) \
+{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \
+ t ^= e_key[4 * i]; e_key[4 * i + 4] = t; \
+ t ^= e_key[4 * i + 1]; e_key[4 * i + 5] = t; \
+ t ^= e_key[4 * i + 2]; e_key[4 * i + 6] = t; \
+ t ^= e_key[4 * i + 3]; e_key[4 * i + 7] = t; \
+}
+
+#define loop6(i) \
+{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \
+ t ^= e_key[6 * i]; e_key[6 * i + 6] = t; \
+ t ^= e_key[6 * i + 1]; e_key[6 * i + 7] = t; \
+ t ^= e_key[6 * i + 2]; e_key[6 * i + 8] = t; \
+ t ^= e_key[6 * i + 3]; e_key[6 * i + 9] = t; \
+ t ^= e_key[6 * i + 4]; e_key[6 * i + 10] = t; \
+ t ^= e_key[6 * i + 5]; e_key[6 * i + 11] = t; \
+}
+
+#define loop8(i) \
+{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \
+ t ^= e_key[8 * i]; e_key[8 * i + 8] = t; \
+ t ^= e_key[8 * i + 1]; e_key[8 * i + 9] = t; \
+ t ^= e_key[8 * i + 2]; e_key[8 * i + 10] = t; \
+ t ^= e_key[8 * i + 3]; e_key[8 * i + 11] = t; \
+ t = e_key[8 * i + 4] ^ ls_box(t); \
+ e_key[8 * i + 12] = t; \
+ t ^= e_key[8 * i + 5]; e_key[8 * i + 13] = t; \
+ t ^= e_key[8 * i + 6]; e_key[8 * i + 14] = t; \
+ t ^= e_key[8 * i + 7]; e_key[8 * i + 15] = t; \
+}
+
+rijndael_ctx *
+rijndael_set_key(rijndael_ctx *ctx, const u4byte *in_key, const u4byte key_len,
+ int encrypt)
+{
+ u4byte i, t, u, v, w;
+ u4byte *e_key = ctx->e_key;
+ u4byte *d_key = ctx->d_key;
+
+ ctx->decrypt = !encrypt;
+
+ if(!tab_gen)
+ gen_tabs();
+
+ ctx->k_len = (key_len + 31) / 32;
+
+ e_key[0] = in_key[0]; e_key[1] = in_key[1];
+ e_key[2] = in_key[2]; e_key[3] = in_key[3];
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(a )) ^ *((u_int32_t*)rk[ROUNDS][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(a+ 4)) ^ *((u_int32_t*)rk[ROUNDS][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(a+ 8)) ^ *((u_int32_t*)rk[ROUNDS][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(a+12)) ^ *((u_int32_t*)rk[ROUNDS][3]);
-
- *((u_int32_t*)(b )) = *((u_int32_t*)T5[temp[0][0]])
- ^ *((u_int32_t*)T6[temp[3][1]])
- ^ *((u_int32_t*)T7[temp[2][2]])
- ^ *((u_int32_t*)T8[temp[1][3]]);
- *((u_int32_t*)(b+ 4)) = *((u_int32_t*)T5[temp[1][0]])
- ^ *((u_int32_t*)T6[temp[0][1]])
- ^ *((u_int32_t*)T7[temp[3][2]])
- ^ *((u_int32_t*)T8[temp[2][3]]);
- *((u_int32_t*)(b+ 8)) = *((u_int32_t*)T5[temp[2][0]])
- ^ *((u_int32_t*)T6[temp[1][1]])
- ^ *((u_int32_t*)T7[temp[0][2]])
- ^ *((u_int32_t*)T8[temp[3][3]]);
- *((u_int32_t*)(b+12)) = *((u_int32_t*)T5[temp[3][0]])
- ^ *((u_int32_t*)T6[temp[2][1]])
- ^ *((u_int32_t*)T7[temp[1][2]])
- ^ *((u_int32_t*)T8[temp[0][3]]);
- for (r = ROUNDS-1; r > 1; r--) {
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[r][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[r][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[r][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[r][3]);
- *((u_int32_t*)(b )) = *((u_int32_t*)T5[temp[0][0]])
- ^ *((u_int32_t*)T6[temp[3][1]])
- ^ *((u_int32_t*)T7[temp[2][2]])
- ^ *((u_int32_t*)T8[temp[1][3]]);
- *((u_int32_t*)(b+ 4)) = *((u_int32_t*)T5[temp[1][0]])
- ^ *((u_int32_t*)T6[temp[0][1]])
- ^ *((u_int32_t*)T7[temp[3][2]])
- ^ *((u_int32_t*)T8[temp[2][3]]);
- *((u_int32_t*)(b+ 8)) = *((u_int32_t*)T5[temp[2][0]])
- ^ *((u_int32_t*)T6[temp[1][1]])
- ^ *((u_int32_t*)T7[temp[0][2]])
- ^ *((u_int32_t*)T8[temp[3][3]]);
- *((u_int32_t*)(b+12)) = *((u_int32_t*)T5[temp[3][0]])
- ^ *((u_int32_t*)T6[temp[2][1]])
- ^ *((u_int32_t*)T7[temp[1][2]])
- ^ *((u_int32_t*)T8[temp[0][3]]);
+ switch(ctx->k_len) {
+ case 4: t = e_key[3];
+ for(i = 0; i < 10; ++i)
+ loop4(i);
+ break;
+
+ case 6: e_key[4] = in_key[4]; t = e_key[5] = in_key[5];
+ for(i = 0; i < 8; ++i)
+ loop6(i);
+ break;
+
+ case 8: e_key[4] = in_key[4]; e_key[5] = in_key[5];
+ e_key[6] = in_key[6]; t = e_key[7] = in_key[7];
+ for(i = 0; i < 7; ++i)
+ loop8(i);
+ break;
+ }
+
+ if (!encrypt) {
+ d_key[0] = e_key[0]; d_key[1] = e_key[1];
+ d_key[2] = e_key[2]; d_key[3] = e_key[3];
+
+ for(i = 4; i < 4 * ctx->k_len + 24; ++i) {
+ imix_col(d_key[i], e_key[i]);
+ }
}
- /* last round is special */
- *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[1][0]);
- *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[1][1]);
- *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[1][2]);
- *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[1][3]);
- b[ 0] = S5[temp[0][0]];
- b[ 1] = S5[temp[3][1]];
- b[ 2] = S5[temp[2][2]];
- b[ 3] = S5[temp[1][3]];
- b[ 4] = S5[temp[1][0]];
- b[ 5] = S5[temp[0][1]];
- b[ 6] = S5[temp[3][2]];
- b[ 7] = S5[temp[2][3]];
- b[ 8] = S5[temp[2][0]];
- b[ 9] = S5[temp[1][1]];
- b[10] = S5[temp[0][2]];
- b[11] = S5[temp[3][3]];
- b[12] = S5[temp[3][0]];
- b[13] = S5[temp[2][1]];
- b[14] = S5[temp[1][2]];
- b[15] = S5[temp[0][3]];
- *((u_int32_t*)(b )) ^= *((u_int32_t*)rk[0][0]);
- *((u_int32_t*)(b+ 4)) ^= *((u_int32_t*)rk[0][1]);
- *((u_int32_t*)(b+ 8)) ^= *((u_int32_t*)rk[0][2]);
- *((u_int32_t*)(b+12)) ^= *((u_int32_t*)rk[0][3]);
-
- return 0;
+
+ return ctx;
}
-int
-rijndael_makekey(rijndael_key *key, int direction, int keyLen, u_int8_t *keyMaterial)
-{
- u_int8_t k[RIJNDAEL_MAXKC][4];
- int i;
-
- if (key == NULL)
- return -1;
- if ((direction != RIJNDAEL_ENCRYPT) && (direction != RIJNDAEL_DECRYPT))
- return -1;
- if ((keyLen != 128) && (keyLen != 192) && (keyLen != 256))
- return -1;
-
- key->ROUNDS = keyLen/32 + 6;
-
- /* initialize key schedule: */
- for (i = 0; i < keyLen/8; i++)
- k[i >> 2][i & 3] = (u_int8_t)keyMaterial[i];
-
- rijndael_keysched(k, key->keySched, key->ROUNDS);
- if (direction == RIJNDAEL_DECRYPT)
- rijndael_key_enc_to_dec(key->keySched, key->ROUNDS);
- return 0;
+/* encrypt a block of text */
+
+#define f_nround(bo, bi, k) \
+ f_rn(bo, bi, 0, k); \
+ f_rn(bo, bi, 1, k); \
+ f_rn(bo, bi, 2, k); \
+ f_rn(bo, bi, 3, k); \
+ k += 4
+
+#define f_lround(bo, bi, k) \
+ f_rl(bo, bi, 0, k); \
+ f_rl(bo, bi, 1, k); \
+ f_rl(bo, bi, 2, k); \
+ f_rl(bo, bi, 3, k)
+
+void
+rijndael_encrypt(rijndael_ctx *ctx, const u4byte *in_blk, u4byte *out_blk)
+{
+ u4byte k_len = ctx->k_len;
+ u4byte *e_key = ctx->e_key;
+ u4byte b0[4], b1[4], *kp;
+
+ b0[0] = in_blk[0] ^ e_key[0]; b0[1] = in_blk[1] ^ e_key[1];
+ b0[2] = in_blk[2] ^ e_key[2]; b0[3] = in_blk[3] ^ e_key[3];
+
+ kp = e_key + 4;
+
+ if(k_len > 6) {
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ }
+
+ if(k_len > 4) {
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ }
+
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ f_nround(b1, b0, kp); f_nround(b0, b1, kp);
+ f_nround(b1, b0, kp); f_lround(b0, b1, kp);
+
+ out_blk[0] = b0[0]; out_blk[1] = b0[1];
+ out_blk[2] = b0[2]; out_blk[3] = b0[3];
+}
+
+/* decrypt a block of text */
+
+#define i_nround(bo, bi, k) \
+ i_rn(bo, bi, 0, k); \
+ i_rn(bo, bi, 1, k); \
+ i_rn(bo, bi, 2, k); \
+ i_rn(bo, bi, 3, k); \
+ k -= 4
+
+#define i_lround(bo, bi, k) \
+ i_rl(bo, bi, 0, k); \
+ i_rl(bo, bi, 1, k); \
+ i_rl(bo, bi, 2, k); \
+ i_rl(bo, bi, 3, k)
+
+void
+rijndael_decrypt(rijndael_ctx *ctx, const u4byte *in_blk, u4byte *out_blk)
+{
+ u4byte b0[4], b1[4], *kp;
+ u4byte k_len = ctx->k_len;
+ u4byte *e_key = ctx->e_key;
+ u4byte *d_key = ctx->d_key;
+
+ b0[0] = in_blk[0] ^ e_key[4 * k_len + 24]; b0[1] = in_blk[1] ^ e_key[4 * k_len + 25];
+ b0[2] = in_blk[2] ^ e_key[4 * k_len + 26]; b0[3] = in_blk[3] ^ e_key[4 * k_len + 27];
+
+ kp = d_key + 4 * (k_len + 5);
+
+ if(k_len > 6) {
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ }
+
+ if(k_len > 4) {
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ }
+
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ i_nround(b1, b0, kp); i_nround(b0, b1, kp);
+ i_nround(b1, b0, kp); i_lround(b0, b1, kp);
+
+ out_blk[0] = b0[0]; out_blk[1] = b0[1];
+ out_blk[2] = b0[2]; out_blk[3] = b0[3];
}
diff --git a/usr.bin/ssh/rijndael.h b/usr.bin/ssh/rijndael.h
index 75853cfff2c..c13f18c9590 100644
--- a/usr.bin/ssh/rijndael.h
+++ b/usr.bin/ssh/rijndael.h
@@ -1,27 +1,31 @@
-/*
- * rijndael-alg-fst.h v2.4 April '2000
- * rijndael-api-fst.h v2.4 April '2000
- *
- * Optimised ANSI C code
- *
- */
+#ifndef _RIJNDAEL_H_
+#define _RIJNDAEL_H_
-#ifndef RIJNDAEL_H
-#define RIJNDAEL_H
+/* 1. Standard types for AES cryptography source code */
-#define RIJNDAEL_MAXKC (256/32)
-#define RIJNDAEL_MAXROUNDS 14
+typedef u_int8_t u1byte; /* an 8 bit unsigned character type */
+typedef u_int16_t u2byte; /* a 16 bit unsigned integer type */
+typedef u_int32_t u4byte; /* a 32 bit unsigned integer type */
-#define RIJNDAEL_ENCRYPT 0
-#define RIJNDAEL_DECRYPT 1
+typedef int8_t s1byte; /* an 8 bit signed character type */
+typedef int16_t s2byte; /* a 16 bit signed integer type */
+typedef int32_t s4byte; /* a 32 bit signed integer type */
-typedef struct {
- int ROUNDS; /* key-length-dependent number of rounds */
- u_int8_t keySched[RIJNDAEL_MAXROUNDS+1][4][4];
-} rijndael_key;
+typedef struct _rijndael_ctx {
+ u4byte k_len;
+ int decrypt;
+ u4byte e_key[64];
+ u4byte d_key[64];
+} rijndael_ctx;
-int rijndael_encrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16]);
-int rijndael_decrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16]);
-int rijndael_makekey(rijndael_key *key, int direction, int keyLen, u_int8_t *keyMaterial);
-#endif
+/* 2. Standard interface for AES cryptographic routines */
+
+/* These are all based on 32 bit unsigned values and will therefore */
+/* require endian conversions for big-endian architectures */
+
+rijndael_ctx *rijndael_set_key __P((rijndael_ctx *, const u4byte *, u4byte, int));
+void rijndael_encrypt __P((rijndael_ctx *, const u4byte *, u4byte *));
+void rijndael_decrypt __P((rijndael_ctx *, const u4byte *, u4byte *));
+
+#endif /* _RIJNDAEL_H_ */