summaryrefslogtreecommitdiff
path: root/lib/libcrypto
diff options
context:
space:
mode:
authorJoel Sing <jsing@cvs.openbsd.org>2014-06-22 12:05:10 +0000
committerJoel Sing <jsing@cvs.openbsd.org>2014-06-22 12:05:10 +0000
commitae697ff0e9bf95b9d923fa20819d33eec71ecfc1 (patch)
tree8a0dcdc058f47cb9774d9b367f4f1286a8b859cd /lib/libcrypto
parent3845da47840940f8b010b8b6892b9ff4417887a4 (diff)
KNF.
Diffstat (limited to 'lib/libcrypto')
-rw-r--r--lib/libcrypto/engine/eng_lib.c290
-rw-r--r--lib/libcrypto/engine/eng_openssl.c228
-rw-r--r--lib/libcrypto/engine/eng_padlock.c496
-rw-r--r--lib/libcrypto/engine/eng_pkey.c157
-rw-r--r--lib/libcrypto/engine/eng_rsax.c613
-rw-r--r--lib/libcrypto/engine/eng_table.c258
6 files changed, 1081 insertions, 961 deletions
diff --git a/lib/libcrypto/engine/eng_lib.c b/lib/libcrypto/engine/eng_lib.c
index 4288535d72f..569b7199cec 100644
--- a/lib/libcrypto/engine/eng_lib.c
+++ b/lib/libcrypto/engine/eng_lib.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: eng_lib.c,v 1.7 2014/06/12 15:49:29 deraadt Exp $ */
+/* $OpenBSD: eng_lib.c,v 1.8 2014/06/22 12:05:09 jsing Exp $ */
/* Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL
* project 2000.
*/
@@ -10,7 +10,7 @@
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
@@ -61,28 +61,29 @@
/* The "new"/"free" stuff first */
-ENGINE *ENGINE_new(void)
- {
+ENGINE *
+ENGINE_new(void)
+{
ENGINE *ret;
ret = malloc(sizeof(ENGINE));
- if(ret == NULL)
- {
+ if (ret == NULL) {
ENGINEerr(ENGINE_F_ENGINE_NEW, ERR_R_MALLOC_FAILURE);
return NULL;
- }
+ }
memset(ret, 0, sizeof(ENGINE));
ret->struct_ref = 1;
engine_ref_debug(ret, 0, 1)
CRYPTO_new_ex_data(CRYPTO_EX_INDEX_ENGINE, ret, &ret->ex_data);
return ret;
- }
+}
/* Placed here (close proximity to ENGINE_new) so that modifications to the
* elements of the ENGINE structure are more likely to be caught and changed
* here. */
-void engine_set_all_null(ENGINE *e)
- {
+void
+engine_set_all_null(ENGINE *e)
+{
e->id = NULL;
e->name = NULL;
e->rsa_meth = NULL;
@@ -100,41 +101,43 @@ void engine_set_all_null(ENGINE *e)
e->load_pubkey = NULL;
e->cmd_defns = NULL;
e->flags = 0;
- }
+}
-int engine_free_util(ENGINE *e, int locked)
- {
+int
+engine_free_util(ENGINE *e, int locked)
+{
int i;
- if(e == NULL)
- {
+ if (e == NULL) {
ENGINEerr(ENGINE_F_ENGINE_FREE_UTIL,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
- if(locked)
- i = CRYPTO_add(&e->struct_ref,-1,CRYPTO_LOCK_ENGINE);
+ }
+ if (locked)
+ i = CRYPTO_add(&e->struct_ref, -1, CRYPTO_LOCK_ENGINE);
else
i = --e->struct_ref;
engine_ref_debug(e, 0, -1)
- if (i > 0) return 1;
+ if (i > 0)
+ return 1;
/* Free up any dynamically allocated public key methods */
engine_pkey_meths_free(e);
engine_pkey_asn1_meths_free(e);
/* Give the ENGINE a chance to do any structural cleanup corresponding
* to allocation it did in its constructor (eg. unload error strings) */
- if(e->destroy)
+ if (e->destroy)
e->destroy(e);
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_ENGINE, e, &e->ex_data);
free(e);
return 1;
- }
+}
-int ENGINE_free(ENGINE *e)
- {
+int
+ENGINE_free(ENGINE *e)
+{
return engine_free_util(e, 1);
- }
+}
/* Cleanup stuff */
@@ -143,183 +146,218 @@ int ENGINE_free(ENGINE *e)
* bloat by referring to all *possible* cleanups, but any linker bloat into code
* "X" will cause X's cleanup function to end up here. */
static STACK_OF(ENGINE_CLEANUP_ITEM) *cleanup_stack = NULL;
-static int int_cleanup_check(int create)
- {
- if(cleanup_stack) return 1;
- if(!create) return 0;
+static int
+int_cleanup_check(int create)
+{
+ if (cleanup_stack)
+ return 1;
+ if (!create)
+ return 0;
cleanup_stack = sk_ENGINE_CLEANUP_ITEM_new_null();
return (cleanup_stack ? 1 : 0);
- }
-static ENGINE_CLEANUP_ITEM *int_cleanup_item(ENGINE_CLEANUP_CB *cb)
- {
+}
+
+static ENGINE_CLEANUP_ITEM *
+int_cleanup_item(ENGINE_CLEANUP_CB *cb)
+{
ENGINE_CLEANUP_ITEM *item = malloc(sizeof(ENGINE_CLEANUP_ITEM));
- if(!item) return NULL;
+
+ if (!item)
+ return NULL;
item->cb = cb;
return item;
- }
-void engine_cleanup_add_first(ENGINE_CLEANUP_CB *cb)
- {
+}
+
+void
+engine_cleanup_add_first(ENGINE_CLEANUP_CB *cb)
+{
ENGINE_CLEANUP_ITEM *item;
- if(!int_cleanup_check(1)) return;
+
+ if (!int_cleanup_check(1))
+ return;
item = int_cleanup_item(cb);
- if(item)
+ if (item)
sk_ENGINE_CLEANUP_ITEM_insert(cleanup_stack, item, 0);
- }
-void engine_cleanup_add_last(ENGINE_CLEANUP_CB *cb)
- {
+}
+
+void
+engine_cleanup_add_last(ENGINE_CLEANUP_CB *cb)
+{
ENGINE_CLEANUP_ITEM *item;
- if(!int_cleanup_check(1)) return;
+
+ if (!int_cleanup_check(1))
+ return;
item = int_cleanup_item(cb);
- if(item)
+ if (item)
sk_ENGINE_CLEANUP_ITEM_push(cleanup_stack, item);
- }
+}
/* The API function that performs all cleanup */
-static void engine_cleanup_cb_free(ENGINE_CLEANUP_ITEM *item)
- {
+static void
+engine_cleanup_cb_free(ENGINE_CLEANUP_ITEM *item)
+{
(*(item->cb))();
free(item);
- }
-void ENGINE_cleanup(void)
- {
- if(int_cleanup_check(0))
- {
+}
+
+void
+ENGINE_cleanup(void)
+{
+ if (int_cleanup_check(0)) {
sk_ENGINE_CLEANUP_ITEM_pop_free(cleanup_stack,
- engine_cleanup_cb_free);
+ engine_cleanup_cb_free);
cleanup_stack = NULL;
- }
+ }
/* FIXME: This should be handled (somehow) through RAND, eg. by it
* registering a cleanup callback. */
RAND_set_rand_method(NULL);
- }
+}
/* Now the "ex_data" support */
-int ENGINE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
- CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func)
- {
+int
+ENGINE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
+ CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func)
+{
return CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_ENGINE, argl, argp,
- new_func, dup_func, free_func);
- }
+ new_func, dup_func, free_func);
+}
-int ENGINE_set_ex_data(ENGINE *e, int idx, void *arg)
- {
- return(CRYPTO_set_ex_data(&e->ex_data, idx, arg));
- }
+int
+ENGINE_set_ex_data(ENGINE *e, int idx, void *arg)
+{
+ return (CRYPTO_set_ex_data(&e->ex_data, idx, arg));
+}
-void *ENGINE_get_ex_data(const ENGINE *e, int idx)
- {
- return(CRYPTO_get_ex_data(&e->ex_data, idx));
- }
+void *
+ENGINE_get_ex_data(const ENGINE *e, int idx)
+{
+ return (CRYPTO_get_ex_data(&e->ex_data, idx));
+}
/* Functions to get/set an ENGINE's elements - mainly to avoid exposing the
* ENGINE structure itself. */
-int ENGINE_set_id(ENGINE *e, const char *id)
- {
- if(id == NULL)
- {
+int
+ENGINE_set_id(ENGINE *e, const char *id)
+{
+ if (id == NULL) {
ENGINEerr(ENGINE_F_ENGINE_SET_ID,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
+ }
e->id = id;
return 1;
- }
+}
-int ENGINE_set_name(ENGINE *e, const char *name)
- {
- if(name == NULL)
- {
+int
+ENGINE_set_name(ENGINE *e, const char *name)
+{
+ if (name == NULL) {
ENGINEerr(ENGINE_F_ENGINE_SET_NAME,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
+ }
e->name = name;
return 1;
- }
+}
-int ENGINE_set_destroy_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR destroy_f)
- {
+int
+ENGINE_set_destroy_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR destroy_f)
+{
e->destroy = destroy_f;
return 1;
- }
+}
-int ENGINE_set_init_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR init_f)
- {
+int
+ENGINE_set_init_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR init_f)
+{
e->init = init_f;
return 1;
- }
+}
-int ENGINE_set_finish_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR finish_f)
- {
+int
+ENGINE_set_finish_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR finish_f)
+{
e->finish = finish_f;
return 1;
- }
+}
-int ENGINE_set_ctrl_function(ENGINE *e, ENGINE_CTRL_FUNC_PTR ctrl_f)
- {
+int
+ENGINE_set_ctrl_function(ENGINE *e, ENGINE_CTRL_FUNC_PTR ctrl_f)
+{
e->ctrl = ctrl_f;
return 1;
- }
+}
-int ENGINE_set_flags(ENGINE *e, int flags)
- {
+int
+ENGINE_set_flags(ENGINE *e, int flags)
+{
e->flags = flags;
return 1;
- }
+}
-int ENGINE_set_cmd_defns(ENGINE *e, const ENGINE_CMD_DEFN *defns)
- {
+int
+ENGINE_set_cmd_defns(ENGINE *e, const ENGINE_CMD_DEFN *defns)
+{
e->cmd_defns = defns;
return 1;
- }
+}
-const char *ENGINE_get_id(const ENGINE *e)
- {
+const char *
+ENGINE_get_id(const ENGINE *e)
+{
return e->id;
- }
+}
-const char *ENGINE_get_name(const ENGINE *e)
- {
+const char *
+ENGINE_get_name(const ENGINE *e)
+{
return e->name;
- }
+}
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_destroy_function(const ENGINE *e)
- {
+ENGINE_GEN_INT_FUNC_PTR
+ENGINE_get_destroy_function(const ENGINE *e)
+{
return e->destroy;
- }
+}
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_init_function(const ENGINE *e)
- {
+ENGINE_GEN_INT_FUNC_PTR
+ENGINE_get_init_function(const ENGINE *e)
+{
return e->init;
- }
+}
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_finish_function(const ENGINE *e)
- {
+ENGINE_GEN_INT_FUNC_PTR
+ENGINE_get_finish_function(const ENGINE *e)
+{
return e->finish;
- }
+}
-ENGINE_CTRL_FUNC_PTR ENGINE_get_ctrl_function(const ENGINE *e)
- {
+ENGINE_CTRL_FUNC_PTR
+ENGINE_get_ctrl_function(const ENGINE *e)
+{
return e->ctrl;
- }
+}
-int ENGINE_get_flags(const ENGINE *e)
- {
+int
+ENGINE_get_flags(const ENGINE *e)
+{
return e->flags;
- }
+}
-const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *e)
- {
+const ENGINE_CMD_DEFN *
+ENGINE_get_cmd_defns(const ENGINE *e)
+{
return e->cmd_defns;
- }
+}
/* eng_lib.o is pretty much linked into anything that touches ENGINE already, so
* put the "static_state" hack here. */
static int internal_static_hack = 0;
-void *ENGINE_get_static_state(void)
- {
+void *
+ENGINE_get_static_state(void)
+{
return &internal_static_hack;
- }
+}
diff --git a/lib/libcrypto/engine/eng_openssl.c b/lib/libcrypto/engine/eng_openssl.c
index f7cd8df622b..9ba61dd842b 100644
--- a/lib/libcrypto/engine/eng_openssl.c
+++ b/lib/libcrypto/engine/eng_openssl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: eng_openssl.c,v 1.5 2014/06/12 15:49:29 deraadt Exp $ */
+/* $OpenBSD: eng_openssl.c,v 1.6 2014/06/22 12:05:09 jsing Exp $ */
/* Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL
* project 2000.
*/
@@ -10,7 +10,7 @@
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
@@ -57,7 +57,7 @@
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECDH support in OpenSSL originally developed by
+ * ECDH support in OpenSSL originally developed by
* SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
*/
@@ -106,21 +106,21 @@
#undef TEST_ENG_OPENSSL_SHA_OTHERS
#undef TEST_ENG_OPENSSL_SHA_P_INIT
#undef TEST_ENG_OPENSSL_SHA_P_UPDATE
-#undef TEST_ENG_OPENSSL_SHA_P_FINAL
+#undef TEST_ENG_OPENSSL_SHA_P_FINAL
#endif
#ifdef TEST_ENG_OPENSSL_RC4
static int openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
- const int **nids, int nid);
+ const int **nids, int nid);
#endif
#ifdef TEST_ENG_OPENSSL_SHA
static int openssl_digests(ENGINE *e, const EVP_MD **digest,
- const int **nids, int nid);
+ const int **nids, int nid);
#endif
#ifdef TEST_ENG_OPENSSL_PKEY
static EVP_PKEY *openssl_load_privkey(ENGINE *eng, const char *key_id,
- UI_METHOD *ui_method, void *callback_data);
+ UI_METHOD *ui_method, void *callback_data);
#endif
/* The constants used when creating the ENGINE */
@@ -129,79 +129,85 @@ static const char *engine_openssl_name = "Software engine support";
/* This internal function is used by ENGINE_openssl() and possibly by the
* "dynamic" ENGINE support too */
-static int bind_helper(ENGINE *e)
- {
- if(!ENGINE_set_id(e, engine_openssl_id)
- || !ENGINE_set_name(e, engine_openssl_name)
+static int
+bind_helper(ENGINE *e)
+{
+ if (!ENGINE_set_id(e, engine_openssl_id) ||
+ !ENGINE_set_name(e, engine_openssl_name)
#ifndef TEST_ENG_OPENSSL_NO_ALGORITHMS
#ifndef OPENSSL_NO_RSA
- || !ENGINE_set_RSA(e, RSA_get_default_method())
+ || !ENGINE_set_RSA(e, RSA_get_default_method())
#endif
#ifndef OPENSSL_NO_DSA
- || !ENGINE_set_DSA(e, DSA_get_default_method())
+ || !ENGINE_set_DSA(e, DSA_get_default_method())
#endif
#ifndef OPENSSL_NO_ECDH
- || !ENGINE_set_ECDH(e, ECDH_OpenSSL())
+ || !ENGINE_set_ECDH(e, ECDH_OpenSSL())
#endif
#ifndef OPENSSL_NO_ECDSA
- || !ENGINE_set_ECDSA(e, ECDSA_OpenSSL())
+ || !ENGINE_set_ECDSA(e, ECDSA_OpenSSL())
#endif
#ifndef OPENSSL_NO_DH
- || !ENGINE_set_DH(e, DH_get_default_method())
+ || !ENGINE_set_DH(e, DH_get_default_method())
#endif
- || !ENGINE_set_RAND(e, RAND_SSLeay())
+ || !ENGINE_set_RAND(e, RAND_SSLeay())
#ifdef TEST_ENG_OPENSSL_RC4
- || !ENGINE_set_ciphers(e, openssl_ciphers)
+ || !ENGINE_set_ciphers(e, openssl_ciphers)
#endif
#ifdef TEST_ENG_OPENSSL_SHA
- || !ENGINE_set_digests(e, openssl_digests)
+ || !ENGINE_set_digests(e, openssl_digests)
#endif
#endif
#ifdef TEST_ENG_OPENSSL_PKEY
- || !ENGINE_set_load_privkey_function(e, openssl_load_privkey)
+ || !ENGINE_set_load_privkey_function(e, openssl_load_privkey)
#endif
- )
+ )
return 0;
/* If we add errors to this ENGINE, ensure the error handling is setup here */
/* openssl_load_error_strings(); */
return 1;
- }
+}
-static ENGINE *engine_openssl(void)
- {
+static ENGINE *
+engine_openssl(void)
+{
ENGINE *ret = ENGINE_new();
- if(!ret)
+
+ if (!ret)
return NULL;
- if(!bind_helper(ret))
- {
+ if (!bind_helper(ret)) {
ENGINE_free(ret);
return NULL;
- }
- return ret;
}
+ return ret;
+}
-void ENGINE_load_openssl(void)
- {
+void
+ENGINE_load_openssl(void)
+{
ENGINE *toadd = engine_openssl();
- if(!toadd) return;
+
+ if (!toadd)
+ return;
ENGINE_add(toadd);
/* If the "add" worked, it gets a structural reference. So either way,
* we release our just-created reference. */
ENGINE_free(toadd);
ERR_clear_error();
- }
+}
/* This stuff is needed if this ENGINE is being compiled into a self-contained
* shared-library. */
#ifdef ENGINE_DYNAMIC_SUPPORT
-static int bind_fn(ENGINE *e, const char *id)
- {
- if(id && (strcmp(id, engine_openssl_id) != 0))
+static int
+bind_fn(ENGINE *e, const char *id)
+{
+ if (id && (strcmp(id, engine_openssl_id) != 0))
return 0;
- if(!bind_helper(e))
+ if (!bind_helper(e))
return 0;
return 1;
- }
+}
IMPLEMENT_DYNAMIC_CHECK_FN()
IMPLEMENT_DYNAMIC_BIND_FN(bind_fn)
#endif /* ENGINE_DYNAMIC_SUPPORT */
@@ -219,37 +225,42 @@ IMPLEMENT_DYNAMIC_BIND_FN(bind_fn)
*/
#include <openssl/rc4.h>
#define TEST_RC4_KEY_SIZE 16
-static int test_cipher_nids[] = {NID_rc4,NID_rc4_40};
+static int test_cipher_nids[] = {NID_rc4, NID_rc4_40};
static int test_cipher_nids_number = 2;
+
typedef struct {
unsigned char key[TEST_RC4_KEY_SIZE];
RC4_KEY ks;
- } TEST_RC4_KEY;
+} TEST_RC4_KEY;
+
#define test(ctx) ((TEST_RC4_KEY *)(ctx)->cipher_data)
-static int test_rc4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
- const unsigned char *iv, int enc)
- {
+static int
+test_rc4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
#ifdef TEST_ENG_OPENSSL_RC4_P_INIT
fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) test_init_key() called\n");
#endif
- memcpy(&test(ctx)->key[0],key,EVP_CIPHER_CTX_key_length(ctx));
- RC4_set_key(&test(ctx)->ks,EVP_CIPHER_CTX_key_length(ctx),
- test(ctx)->key);
+ memcpy(&test(ctx)->key[0], key, EVP_CIPHER_CTX_key_length(ctx));
+ RC4_set_key(&test(ctx)->ks, EVP_CIPHER_CTX_key_length(ctx),
+ test(ctx)->key);
return 1;
- }
-static int test_rc4_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
- const unsigned char *in, size_t inl)
- {
+}
+
+static int
+test_rc4_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t inl)
+{
#ifdef TEST_ENG_OPENSSL_RC4_P_CIPHER
fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) test_cipher() called\n");
#endif
- RC4(&test(ctx)->ks,inl,in,out);
+ RC4(&test(ctx)->ks, inl, in, out);
return 1;
- }
-static const EVP_CIPHER test_r4_cipher=
- {
+}
+
+static const EVP_CIPHER test_r4_cipher = {
NID_rc4,
- 1,TEST_RC4_KEY_SIZE,0,
+ 1, TEST_RC4_KEY_SIZE, 0,
EVP_CIPH_VARIABLE_LENGTH,
test_rc4_init_key,
test_rc4_cipher,
@@ -259,9 +270,9 @@ static const EVP_CIPHER test_r4_cipher=
NULL,
NULL,
NULL
- };
-static const EVP_CIPHER test_r4_40_cipher=
- {
+};
+
+static const EVP_CIPHER test_r4_40_cipher = {
NID_rc4_40,
1,5 /* 40 bit */,0,
EVP_CIPH_VARIABLE_LENGTH,
@@ -269,36 +280,35 @@ static const EVP_CIPHER test_r4_40_cipher=
test_rc4_cipher,
NULL,
sizeof(TEST_RC4_KEY),
- NULL,
+ NULL,
NULL,
NULL,
NULL
- };
-static int openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
- const int **nids, int nid)
- {
- if(!cipher)
- {
+};
+
+static int
+openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
+{
+ if (!cipher) {
/* We are returning a list of supported nids */
*nids = test_cipher_nids;
return test_cipher_nids_number;
- }
+ }
/* We are being asked for a specific cipher */
- if(nid == NID_rc4)
+ if (nid == NID_rc4)
*cipher = &test_r4_cipher;
- else if(nid == NID_rc4_40)
+ else if (nid == NID_rc4_40)
*cipher = &test_r4_40_cipher;
- else
- {
+ else {
#ifdef TEST_ENG_OPENSSL_RC4_OTHERS
fprintf(stderr, "(TEST_ENG_OPENSSL_RC4) returning NULL for "
- "nid %d\n", nid);
+ "nid %d\n", nid);
#endif
*cipher = NULL;
return 0;
- }
- return 1;
}
+ return 1;
+}
#endif
#ifdef TEST_ENG_OPENSSL_SHA
@@ -306,29 +316,35 @@ static int openssl_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
#include <openssl/sha.h>
static int test_digest_nids[] = {NID_sha1};
static int test_digest_nids_number = 1;
-static int test_sha1_init(EVP_MD_CTX *ctx)
- {
+
+static int
+test_sha1_init(EVP_MD_CTX *ctx)
+{
#ifdef TEST_ENG_OPENSSL_SHA_P_INIT
fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_init() called\n");
#endif
return SHA1_Init(ctx->md_data);
- }
-static int test_sha1_update(EVP_MD_CTX *ctx,const void *data,size_t count)
- {
+}
+
+static int
+test_sha1_update(EVP_MD_CTX *ctx, const void *data, size_t count)
+{
#ifdef TEST_ENG_OPENSSL_SHA_P_UPDATE
fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_update() called\n");
#endif
- return SHA1_Update(ctx->md_data,data,count);
- }
-static int test_sha1_final(EVP_MD_CTX *ctx,unsigned char *md)
- {
+ return SHA1_Update(ctx->md_data, data, count);
+}
+
+static int
+test_sha1_final(EVP_MD_CTX *ctx, unsigned char *md)
+{
#ifdef TEST_ENG_OPENSSL_SHA_P_FINAL
fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) test_sha1_final() called\n");
#endif
- return SHA1_Final(md,ctx->md_data);
- }
-static const EVP_MD test_sha_md=
- {
+ return SHA1_Final(md, ctx->md_data);
+}
+
+static const EVP_MD test_sha_md = {
NID_sha1,
NID_sha1WithRSAEncryption,
SHA_DIGEST_LENGTH,
@@ -340,45 +356,47 @@ static const EVP_MD test_sha_md=
NULL,
EVP_PKEY_RSA_method,
SHA_CBLOCK,
- sizeof(EVP_MD *)+sizeof(SHA_CTX),
- };
-static int openssl_digests(ENGINE *e, const EVP_MD **digest,
- const int **nids, int nid)
- {
- if(!digest)
- {
+ sizeof(EVP_MD *) + sizeof(SHA_CTX),
+};
+
+static int
+openssl_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid)
+{
+ if (!digest) {
/* We are returning a list of supported nids */
*nids = test_digest_nids;
return test_digest_nids_number;
- }
+ }
/* We are being asked for a specific digest */
- if(nid == NID_sha1)
+ if (nid == NID_sha1)
*digest = &test_sha_md;
- else
- {
+ else {
#ifdef TEST_ENG_OPENSSL_SHA_OTHERS
fprintf(stderr, "(TEST_ENG_OPENSSL_SHA) returning NULL for "
- "nid %d\n", nid);
+ "nid %d\n", nid);
#endif
*digest = NULL;
return 0;
- }
- return 1;
}
+ return 1;
+}
#endif
#ifdef TEST_ENG_OPENSSL_PKEY
-static EVP_PKEY *openssl_load_privkey(ENGINE *eng, const char *key_id,
- UI_METHOD *ui_method, void *callback_data)
- {
+static EVP_PKEY *
+openssl_load_privkey(ENGINE *eng, const char *key_id, UI_METHOD *ui_method,
+ void *callback_data)
+{
BIO *in;
EVP_PKEY *key;
- fprintf(stderr, "(TEST_ENG_OPENSSL_PKEY)Loading Private key %s\n", key_id);
+
+ fprintf(stderr, "(TEST_ENG_OPENSSL_PKEY)Loading Private key %s\n",
+ key_id);
in = BIO_new_file(key_id, "r");
if (!in)
return NULL;
key = PEM_read_bio_PrivateKey(in, NULL, 0, NULL);
BIO_free(in);
return key;
- }
+}
#endif
diff --git a/lib/libcrypto/engine/eng_padlock.c b/lib/libcrypto/engine/eng_padlock.c
index 0245f44de60..936a440b1a8 100644
--- a/lib/libcrypto/engine/eng_padlock.c
+++ b/lib/libcrypto/engine/eng_padlock.c
@@ -1,11 +1,11 @@
-/* $OpenBSD: eng_padlock.c,v 1.10 2014/06/12 15:49:29 deraadt Exp $ */
-/*
+/* $OpenBSD: eng_padlock.c,v 1.11 2014/06/22 12:05:09 jsing Exp $ */
+/*
* Support for VIA PadLock Advanced Cryptography Engine (ACE)
* Written by Michal Ludvig <michal@logix.cz>
* http://www.logix.cz/michal
*
- * Big thanks to Andy Polyakov for a help with optimization,
- * assembler fixes, port to MS Windows and a lot of other
+ * Big thanks to Andy Polyakov for a help with optimization,
+ * assembler fixes, port to MS Windows and a lot of other
* valuable work on this engine!
*/
@@ -97,7 +97,7 @@
/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
Not only that it doesn't exist elsewhere, but it
even can't be compiled on other platforms!
-
+
In addition, because of the heavy use of inline assembler,
compiler choice is limited to GCC and Microsoft C. */
#undef COMPILE_HW_PADLOCK
@@ -117,7 +117,8 @@ void ENGINE_load_padlock (void)
/* On non-x86 CPUs it just returns. */
#ifdef COMPILE_HW_PADLOCK
ENGINE *toadd = ENGINE_padlock ();
- if (!toadd) return;
+ if (!toadd)
+ return;
ENGINE_add (toadd);
ENGINE_free (toadd);
ERR_clear_error ();
@@ -169,19 +170,18 @@ padlock_bind_helper(ENGINE *e)
padlock_available();
#if 1 /* disable RNG for now, see commentary in vicinity of RNG code */
- padlock_use_rng=0;
+ padlock_use_rng = 0;
#endif
/* Generate a nice engine name with available features */
(void) snprintf(padlock_name, sizeof(padlock_name),
- "VIA PadLock (%s, %s)",
- padlock_use_rng ? "RNG" : "no-RNG",
- padlock_use_ace ? "ACE" : "no-ACE");
+ "VIA PadLock (%s, %s)",
+ padlock_use_rng ? "RNG" : "no-RNG",
+ padlock_use_ace ? "ACE" : "no-ACE");
- /* Register everything or return with an error */
+ /* Register everything or return with an error */
if (!ENGINE_set_id(e, padlock_id) ||
!ENGINE_set_name(e, padlock_name) ||
-
!ENGINE_set_init_function(e, padlock_init) ||
#ifndef OPENSSL_NO_AES
(padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
@@ -254,26 +254,26 @@ IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn)
#define AES_KEY_SIZE_192 24
#define AES_KEY_SIZE_256 32
-/* Here we store the status information relevant to the
+/* Here we store the status information relevant to the
current context. */
/* BIG FAT WARNING:
* Inline assembler in PADLOCK_XCRYPT_ASM()
* depends on the order of items in this structure.
* Don't blindly modify, reorder, etc!
*/
-struct padlock_cipher_data
-{
+struct padlock_cipher_data {
unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
- union { unsigned int pad[4];
+ union {
+ unsigned int pad[4];
struct {
- int rounds:4;
- int dgst:1; /* n/a in C3 */
- int align:1; /* n/a in C3 */
- int ciphr:1; /* n/a in C3 */
- unsigned int keygen:1;
- int interm:1;
- unsigned int encdec:1;
- int ksize:2;
+ int rounds : 4;
+ int dgst : 1; /* n/a in C3 */
+ int align : 1; /* n/a in C3 */
+ int ciphr : 1; /* n/a in C3 */
+ unsigned int keygen : 1;
+ int interm : 1;
+ unsigned int encdec : 1;
+ int ksize : 2;
} b;
} cword; /* Control word */
AES_KEY ks; /* Encryption key */
@@ -313,23 +313,23 @@ padlock_insn_cpuid_available(void)
{
int result = -1;
- /* We're checking if the bit #21 of EFLAGS
+ /* We're checking if the bit #21 of EFLAGS
can be toggled. If yes = CPUID is available. */
asm volatile (
- "pushf\n"
- "popl %%eax\n"
- "xorl $0x200000, %%eax\n"
- "movl %%eax, %%ecx\n"
- "andl $0x200000, %%ecx\n"
- "pushl %%eax\n"
- "popf\n"
- "pushf\n"
- "popl %%eax\n"
- "andl $0x200000, %%eax\n"
- "xorl %%eax, %%ecx\n"
- "movl %%ecx, %0\n"
- : "=r" (result) : : "eax", "ecx");
-
+ "pushf\n"
+ "popl %%eax\n"
+ "xorl $0x200000, %%eax\n"
+ "movl %%eax, %%ecx\n"
+ "andl $0x200000, %%ecx\n"
+ "pushl %%eax\n"
+ "popf\n"
+ "pushf\n"
+ "popl %%eax\n"
+ "andl $0x200000, %%eax\n"
+ "xorl %%eax, %%ecx\n"
+ "movl %%ecx, %0\n"
+ : "=r" (result) : : "eax", "ecx");
+
return (result == 0);
}
@@ -349,31 +349,31 @@ padlock_available(void)
eax = 0x00000000;
vendor_string[12] = 0;
asm volatile (
- "pushl %%ebx\n"
- "cpuid\n"
- "movl %%ebx,(%%edi)\n"
- "movl %%edx,4(%%edi)\n"
- "movl %%ecx,8(%%edi)\n"
- "popl %%ebx"
- : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
+ "pushl %%ebx\n"
+ "cpuid\n"
+ "movl %%ebx,(%%edi)\n"
+ "movl %%edx,4(%%edi)\n"
+ "movl %%ecx,8(%%edi)\n"
+ "popl %%ebx"
+ : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
if (strcmp(vendor_string, "CentaurHauls") != 0)
return 0;
/* Check for Centaur Extended Feature Flags presence */
eax = 0xC0000000;
asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax) : : "ecx", "edx");
+ : "+a"(eax) : : "ecx", "edx");
if (eax < 0xC0000001)
return 0;
/* Read the Centaur Extended Feature Flags */
eax = 0xC0000001;
asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax), "=d"(edx) : : "ecx");
+ : "+a"(eax), "=d"(edx) : : "ecx");
/* Fill up some flags */
- padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
- padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
+ padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
+ padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
return padlock_use_ace + padlock_use_rng;
}
@@ -394,7 +394,7 @@ padlock_bswapl(AES_KEY *ks)
#endif
/* Force key reload from memory to the CPU microcode.
- Loading EFLAGS from the stack clears EFLAGS[30]
+ Loading EFLAGS from the stack clears EFLAGS[30]
which does the trick. */
static inline void
padlock_reload_key(void)
@@ -416,21 +416,21 @@ static inline void
padlock_verify_context(struct padlock_cipher_data *cdata)
{
asm volatile (
- "pushfl\n"
-" btl $30,(%%esp)\n"
-" jnc 1f\n"
-" cmpl %2,%1\n"
-" je 1f\n"
-" popfl\n"
-" subl $4,%%esp\n"
-"1: addl $4,%%esp\n"
-" movl %2,%0"
- :"+m"(padlock_saved_context)
+ "pushfl\n"
+ " btl $30,(%%esp)\n"
+ " jnc 1f\n"
+ " cmpl %2,%1\n"
+ " je 1f\n"
+ " popfl\n"
+ " subl $4,%%esp\n"
+ "1: addl $4,%%esp\n"
+ " movl %2,%0"
+ :"+m"(padlock_saved_context)
: "r"(padlock_saved_context), "r"(cdata) : "cc");
}
/* Template for padlock_xcrypt_* modes */
-/* BIG FAT WARNING:
+/* BIG FAT WARNING:
* The offsets used with 'leal' instructions
* describe items of the 'padlock_cipher_data'
* structure.
@@ -465,9 +465,9 @@ padlock_xstore(void *addr, unsigned int edx_in)
unsigned int eax_out;
asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */
- : "=a"(eax_out),"=m"(*(unsigned *)addr)
- : "D"(addr), "d" (edx_in)
- );
+ : "=a"(eax_out),"=m"(*(unsigned *)addr)
+ : "D"(addr), "d" (edx_in)
+ );
return eax_out;
}
@@ -482,15 +482,16 @@ padlock_xstore(void *addr, unsigned int edx_in)
* In case you wonder 'rep xcrypt*' instructions above are *not*
* affected by the Direction Flag and pointers advance toward
* larger addresses unconditionally.
- */
+ */
static inline unsigned char *
-padlock_memcpy(void *dst,const void *src,size_t n)
+padlock_memcpy(void *dst, const void *src, size_t n)
{
- long *d=dst;
- const long *s=src;
+ long *d = dst;
+ const long *s = src;
n /= sizeof(*d);
- do { *d++ = *s++; } while (--n);
+ do { *d++ = *s++;
+ } while (--n);
return dst;
}
@@ -541,13 +542,13 @@ static int padlock_cipher_nids[] = {
NID_aes_256_ofb,
};
static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
- sizeof(padlock_cipher_nids[0]));
+sizeof(padlock_cipher_nids[0]));
/* Function prototypes ... */
static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
- const unsigned char *iv, int enc);
+ const unsigned char *iv, int enc);
static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
- const unsigned char *in, size_t nbytes);
+ const unsigned char *in, size_t nbytes);
#define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
@@ -578,23 +579,23 @@ static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
NULL \
}
-DECLARE_AES_EVP(128,ecb,ECB);
-DECLARE_AES_EVP(128,cbc,CBC);
-DECLARE_AES_EVP(128,cfb,CFB);
-DECLARE_AES_EVP(128,ofb,OFB);
+DECLARE_AES_EVP(128, ecb, ECB);
+DECLARE_AES_EVP(128, cbc, CBC);
+DECLARE_AES_EVP(128, cfb, CFB);
+DECLARE_AES_EVP(128, ofb, OFB);
-DECLARE_AES_EVP(192,ecb,ECB);
-DECLARE_AES_EVP(192,cbc,CBC);
-DECLARE_AES_EVP(192,cfb,CFB);
-DECLARE_AES_EVP(192,ofb,OFB);
+DECLARE_AES_EVP(192, ecb, ECB);
+DECLARE_AES_EVP(192, cbc, CBC);
+DECLARE_AES_EVP(192, cfb, CFB);
+DECLARE_AES_EVP(192, ofb, OFB);
-DECLARE_AES_EVP(256,ecb,ECB);
-DECLARE_AES_EVP(256,cbc,CBC);
-DECLARE_AES_EVP(256,cfb,CFB);
-DECLARE_AES_EVP(256,ofb,OFB);
+DECLARE_AES_EVP(256, ecb, ECB);
+DECLARE_AES_EVP(256, cbc, CBC);
+DECLARE_AES_EVP(256, cfb, CFB);
+DECLARE_AES_EVP(256, ofb, OFB);
static int
-padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
+padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
{
/* No specific cipher => return a list of supported nids ... */
if (!cipher) {
@@ -604,49 +605,46 @@ padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid
/* ... or the requested "cipher" otherwise */
switch (nid) {
- case NID_aes_128_ecb:
- *cipher = &padlock_aes_128_ecb;
- break;
- case NID_aes_128_cbc:
- *cipher = &padlock_aes_128_cbc;
- break;
- case NID_aes_128_cfb:
- *cipher = &padlock_aes_128_cfb;
- break;
- case NID_aes_128_ofb:
- *cipher = &padlock_aes_128_ofb;
- break;
-
- case NID_aes_192_ecb:
- *cipher = &padlock_aes_192_ecb;
- break;
- case NID_aes_192_cbc:
- *cipher = &padlock_aes_192_cbc;
- break;
- case NID_aes_192_cfb:
- *cipher = &padlock_aes_192_cfb;
- break;
- case NID_aes_192_ofb:
- *cipher = &padlock_aes_192_ofb;
- break;
-
- case NID_aes_256_ecb:
- *cipher = &padlock_aes_256_ecb;
- break;
- case NID_aes_256_cbc:
- *cipher = &padlock_aes_256_cbc;
- break;
- case NID_aes_256_cfb:
- *cipher = &padlock_aes_256_cfb;
- break;
- case NID_aes_256_ofb:
- *cipher = &padlock_aes_256_ofb;
- break;
-
- default:
- /* Sorry, we don't support this NID */
- *cipher = NULL;
- return 0;
+ case NID_aes_128_ecb:
+ *cipher = &padlock_aes_128_ecb;
+ break;
+ case NID_aes_128_cbc:
+ *cipher = &padlock_aes_128_cbc;
+ break;
+ case NID_aes_128_cfb:
+ *cipher = &padlock_aes_128_cfb;
+ break;
+ case NID_aes_128_ofb:
+ *cipher = &padlock_aes_128_ofb;
+ break;
+ case NID_aes_192_ecb:
+ *cipher = &padlock_aes_192_ecb;
+ break;
+ case NID_aes_192_cbc:
+ *cipher = &padlock_aes_192_cbc;
+ break;
+ case NID_aes_192_cfb:
+ *cipher = &padlock_aes_192_cfb;
+ break;
+ case NID_aes_192_ofb:
+ *cipher = &padlock_aes_192_ofb;
+ break;
+ case NID_aes_256_ecb:
+ *cipher = &padlock_aes_256_ecb;
+ break;
+ case NID_aes_256_cbc:
+ *cipher = &padlock_aes_256_cbc;
+ break;
+ case NID_aes_256_cfb:
+ *cipher = &padlock_aes_256_cfb;
+ break;
+ case NID_aes_256_ofb:
+ *cipher = &padlock_aes_256_ofb;
+ break;
+ default:
+ /* Sorry, we don't support this NID */
+ *cipher = NULL;
+ return 0;
}
return 1;
@@ -655,12 +653,13 @@ padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid
/* Prepare the encryption key for PadLock usage */
static int
padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
- const unsigned char *iv, int enc)
+ const unsigned char *iv, int enc)
{
struct padlock_cipher_data *cdata;
int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
- if (key==NULL) return 0; /* ERROR */
+ if (key == NULL)
+ return 0; /* ERROR */
cdata = ALIGNED_CIPHER_DATA(ctx);
memset(cdata, 0, sizeof(struct padlock_cipher_data));
@@ -673,38 +672,38 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
cdata->cword.b.ksize = (key_len - 128) / 64;
- switch(key_len) {
- case 128:
- /* PadLock can generate an extended key for
- AES128 in hardware */
- memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
- cdata->cword.b.keygen = 0;
- break;
+ switch (key_len) {
+ case 128:
+ /* PadLock can generate an extended key for
+ AES128 in hardware */
+ memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
+ cdata->cword.b.keygen = 0;
+ break;
- case 192:
- case 256:
- /* Generate an extended AES key in software.
- Needed for AES192/AES256 */
- /* Well, the above applies to Stepping 8 CPUs
- and is listed as hardware errata. They most
- likely will fix it at some point and then
- a check for stepping would be due here. */
- if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
- EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
- enc)
- AES_set_encrypt_key(key, key_len, &cdata->ks);
- else
- AES_set_decrypt_key(key, key_len, &cdata->ks);
+ case 192:
+ case 256:
+ /* Generate an extended AES key in software.
+ Needed for AES192/AES256 */
+ /* Well, the above applies to Stepping 8 CPUs
+ and is listed as hardware errata. They most
+ likely will fix it at some point and then
+ a check for stepping would be due here. */
+ if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
+ EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
+ enc)
+ AES_set_encrypt_key(key, key_len, &cdata->ks);
+ else
+ AES_set_decrypt_key(key, key_len, &cdata->ks);
#ifndef AES_ASM
- /* OpenSSL C functions use byte-swapped extended key. */
- padlock_bswapl(&cdata->ks);
+ /* OpenSSL C functions use byte-swapped extended key. */
+ padlock_bswapl(&cdata->ks);
#endif
- cdata->cword.b.keygen = 1;
- break;
+ cdata->cword.b.keygen = 1;
+ break;
- default:
- /* ERROR */
- return 0;
+ default:
+ /* ERROR */
+ return 0;
}
/*
@@ -717,7 +716,7 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
return 1;
}
-/*
+/*
* Simplified version of padlock_aes_cipher() used when
* 1) both input and output buffers are at aligned addresses.
* or when
@@ -725,7 +724,7 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
*/
static int
padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
- const unsigned char *in_arg, size_t nbytes)
+ const unsigned char *in_arg, size_t nbytes)
{
struct padlock_cipher_data *cdata;
void *iv;
@@ -735,24 +734,28 @@ padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
switch (EVP_CIPHER_CTX_mode(ctx)) {
case EVP_CIPH_ECB_MODE:
- padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
+ padlock_xcrypt_ecb(nbytes / AES_BLOCK_SIZE, cdata,
+ out_arg, in_arg);
break;
case EVP_CIPH_CBC_MODE:
memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
+ iv = padlock_xcrypt_cbc(nbytes / AES_BLOCK_SIZE, cdata,
+ out_arg, in_arg);
memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
break;
case EVP_CIPH_CFB_MODE:
memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
+ iv = padlock_xcrypt_cfb(nbytes / AES_BLOCK_SIZE, cdata,
+ out_arg, in_arg);
memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
break;
case EVP_CIPH_OFB_MODE:
memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
+ padlock_xcrypt_ofb(nbytes / AES_BLOCK_SIZE, cdata,
+ out_arg, in_arg);
memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
break;
@@ -772,23 +775,24 @@ padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
# error "insane PADLOCK_CHUNK..."
#endif
-/* Re-align the arguments to 16-Bytes boundaries and run the
+/* Re-align the arguments to 16-Bytes boundaries and run the
encryption function itself. This function is not AES-specific. */
static int
padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
- const unsigned char *in_arg, size_t nbytes)
+ const unsigned char *in_arg, size_t nbytes)
{
struct padlock_cipher_data *cdata;
const void *inp;
unsigned char *out;
void *iv;
int inp_misaligned, out_misaligned, realign_in_loop;
- size_t chunk, allocated=0;
+ size_t chunk, allocated = 0;
/* ctx->num is maintained in byte-oriented modes,
such as CFB and OFB... */
- if ((chunk = ctx->num)) { /* borrow chunk variable */
- unsigned char *ivp=ctx->iv;
+ if ((chunk = ctx->num)) {
+ /* borrow chunk variable */
+ unsigned char *ivp = ctx->iv;
switch (EVP_CIPHER_CTX_mode(ctx)) {
case EVP_CIPH_CFB_MODE:
@@ -796,28 +800,29 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
return 0; /* bogus value */
if (ctx->encrypt)
- while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
chunk++, nbytes--;
}
- else while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ else
+ while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
unsigned char c = *(in_arg++);
*(out_arg++) = c ^ ivp[chunk];
ivp[chunk++] = c, nbytes--;
}
- ctx->num = chunk%AES_BLOCK_SIZE;
+ ctx->num = chunk % AES_BLOCK_SIZE;
break;
case EVP_CIPH_OFB_MODE:
if (chunk >= AES_BLOCK_SIZE)
return 0; /* bogus value */
- while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
*(out_arg++) = *(in_arg++) ^ ivp[chunk];
chunk++, nbytes--;
}
- ctx->num = chunk%AES_BLOCK_SIZE;
+ ctx->num = chunk % AES_BLOCK_SIZE;
break;
}
}
@@ -841,8 +846,9 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
same as for software alignment below or ~3x. They promise to
improve it in the future, but for now we can just as well
pretend that it can only handle aligned input... */
- if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0)
- return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
+ if (!padlock_aes_align_required && (nbytes % AES_BLOCK_SIZE) == 0)
+ return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg,
+ nbytes);
inp_misaligned = (((size_t)in_arg) & 0x0F);
out_misaligned = (((size_t)out_arg) & 0x0F);
@@ -853,21 +859,22 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
* in order to improve L1 cache utilization... */
realign_in_loop = out_misaligned|inp_misaligned;
- if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0)
- return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
+ if (!realign_in_loop && (nbytes % AES_BLOCK_SIZE) == 0)
+ return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg,
+ nbytes);
/* this takes one "if" out of the loops */
- chunk = nbytes;
+ chunk = nbytes;
chunk %= PADLOCK_CHUNK;
- if (chunk==0) chunk = PADLOCK_CHUNK;
+ if (chunk == 0)
+ chunk = PADLOCK_CHUNK;
if (out_misaligned) {
/* optmize for small input */
- allocated = (chunk<nbytes?PADLOCK_CHUNK:nbytes);
+ allocated = (chunk < nbytes ? PADLOCK_CHUNK : nbytes);
out = alloca(0x10 + allocated);
out = NEAREST_ALIGNED(out);
- }
- else
+ } else
out = out_arg;
cdata = ALIGNED_CIPHER_DATA(ctx);
@@ -875,77 +882,84 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
switch (EVP_CIPHER_CTX_mode(ctx)) {
case EVP_CIPH_ECB_MODE:
- do {
+ do {
if (inp_misaligned)
inp = padlock_memcpy(out, in_arg, chunk);
else
inp = in_arg;
in_arg += chunk;
- padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
+ padlock_xcrypt_ecb(chunk / AES_BLOCK_SIZE, cdata,
+ out, inp);
if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
+ out_arg = padlock_memcpy(out_arg, out, chunk) +
+ chunk;
else
- out = out_arg+=chunk;
+ out = out_arg += chunk;
nbytes -= chunk;
- chunk = PADLOCK_CHUNK;
+ chunk = PADLOCK_CHUNK;
} while (nbytes);
break;
case EVP_CIPH_CBC_MODE:
memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
goto cbc_shortcut;
- do {
+ do {
if (iv != cdata->iv)
memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
chunk = PADLOCK_CHUNK;
- cbc_shortcut: /* optimize for small input */
+ cbc_shortcut: /* optimize for small input */
if (inp_misaligned)
inp = padlock_memcpy(out, in_arg, chunk);
else
inp = in_arg;
in_arg += chunk;
- iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp);
+ iv = padlock_xcrypt_cbc(chunk / AES_BLOCK_SIZE, cdata,
+ out, inp);
if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
+ out_arg = padlock_memcpy(out_arg, out, chunk) +
+ chunk;
else
- out = out_arg+=chunk;
-
+ out = out_arg += chunk;
} while (nbytes -= chunk);
memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
break;
case EVP_CIPH_CFB_MODE:
memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- chunk &= ~(AES_BLOCK_SIZE-1);
- if (chunk) goto cfb_shortcut;
- else goto cfb_skiploop;
- do {
+ chunk &= ~(AES_BLOCK_SIZE - 1);
+ if (chunk)
+ goto cfb_shortcut;
+ else
+ goto cfb_skiploop;
+ do {
if (iv != cdata->iv)
memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
chunk = PADLOCK_CHUNK;
- cfb_shortcut: /* optimize for small input */
+ cfb_shortcut: /* optimize for small input */
if (inp_misaligned)
inp = padlock_memcpy(out, in_arg, chunk);
else
inp = in_arg;
in_arg += chunk;
- iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
+ iv = padlock_xcrypt_cfb(chunk / AES_BLOCK_SIZE, cdata,
+ out, inp);
if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
+ out_arg = padlock_memcpy(out_arg, out, chunk) +
+ chunk;
else
- out = out_arg+=chunk;
+ out = out_arg += chunk;
nbytes -= chunk;
} while (nbytes >= AES_BLOCK_SIZE);
- cfb_skiploop:
+cfb_skiploop:
if (nbytes) {
unsigned char *ivp = cdata->iv;
@@ -955,19 +969,19 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
}
ctx->num = nbytes;
if (cdata->cword.b.encdec) {
- cdata->cword.b.encdec=0;
+ cdata->cword.b.encdec = 0;
padlock_reload_key();
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
- cdata->cword.b.encdec=1;
+ padlock_xcrypt_ecb(1, cdata, ivp, ivp);
+ cdata->cword.b.encdec = 1;
padlock_reload_key();
- while(nbytes) {
+ while (nbytes) {
unsigned char c = *(in_arg++);
*(out_arg++) = c ^ *ivp;
*(ivp++) = c, nbytes--;
}
- }
- else { padlock_reload_key();
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
+ } else {
+ padlock_reload_key();
+ padlock_xcrypt_ecb(1, cdata, ivp, ivp);
padlock_reload_key();
while (nbytes) {
*ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
@@ -981,7 +995,7 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
case EVP_CIPH_OFB_MODE:
memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- chunk &= ~(AES_BLOCK_SIZE-1);
+ chunk &= ~(AES_BLOCK_SIZE - 1);
if (chunk) do {
if (inp_misaligned)
inp = padlock_memcpy(out, in_arg, chunk);
@@ -989,15 +1003,17 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
inp = in_arg;
in_arg += chunk;
- padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
+ padlock_xcrypt_ofb(chunk / AES_BLOCK_SIZE, cdata,
+ out, inp);
if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
+ out_arg = padlock_memcpy(out_arg, out, chunk) +
+ chunk;
else
- out = out_arg+=chunk;
+ out = out_arg += chunk;
nbytes -= chunk;
- chunk = PADLOCK_CHUNK;
+ chunk = PADLOCK_CHUNK;
} while (nbytes >= AES_BLOCK_SIZE);
if (nbytes) {
@@ -1005,7 +1021,7 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
ctx->num = nbytes;
padlock_reload_key(); /* empirically found */
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
+ padlock_xcrypt_ecb(1, cdata, ivp, ivp);
padlock_reload_key(); /* empirically found */
while (nbytes) {
*(out_arg++) = *(in_arg++) ^ *ivp;
@@ -1022,9 +1038,10 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
/* Clean the realign buffer if it was used */
if (out_misaligned) {
- volatile unsigned long *p=(void *)out;
- size_t n = allocated/sizeof(*p);
- while (n--) *p++=0;
+ volatile unsigned long *p = (void *)out;
+ size_t n = allocated/sizeof(*p);
+ while (n--)
+ *p++ = 0;
}
memset(cdata->iv, 0, AES_BLOCK_SIZE);
@@ -1041,7 +1058,7 @@ padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
* (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
* provide meaningful error control...
*/
-/* Wrapper that provides an interface between the API and
+/* Wrapper that provides an interface between the API and
the raw PadLock RNG */
static int
padlock_rand_bytes(unsigned char *output, int count)
@@ -1050,25 +1067,33 @@ padlock_rand_bytes(unsigned char *output, int count)
while (count >= 8) {
eax = padlock_xstore(output, 0);
- if (!(eax&(1<<6))) return 0; /* RNG disabled */
+ if (!(eax & (1 << 6)))
+ return 0; /* RNG disabled */
/* this ---vv--- covers DC bias, Raw Bits and String Filter */
- if (eax&(0x1F<<10)) return 0;
- if ((eax&0x1F)==0) continue; /* no data, retry... */
- if ((eax&0x1F)!=8) return 0; /* fatal failure... */
+ if (eax & (0x1F << 10))
+ return 0;
+ if ((eax & 0x1F) == 0)
+ continue; /* no data, retry... */
+ if ((eax & 0x1F) != 8)
+ return 0; /* fatal failure... */
output += 8;
- count -= 8;
+ count -= 8;
}
while (count > 0) {
eax = padlock_xstore(&buf, 3);
- if (!(eax&(1<<6))) return 0; /* RNG disabled */
+ if (!(eax & (1 << 6)))
+ return 0; /* RNG disabled */
/* this ---vv--- covers DC bias, Raw Bits and String Filter */
- if (eax&(0x1F<<10)) return 0;
- if ((eax&0x1F)==0) continue; /* no data, retry... */
- if ((eax&0x1F)!=1) return 0; /* fatal failure... */
+ if (eax & (0x1F << 10))
+ return 0;
+ if ((eax & 0x1F) == 0)
+ continue; /* no data, retry... */
+ if ((eax & 0x1F) != 1)
+ return 0; /* fatal failure... */
*output++ = (unsigned char)buf;
count--;
}
- *(volatile unsigned int *)&buf=0;
+ *(volatile unsigned int *)&buf = 0;
return 1;
}
@@ -1089,10 +1114,11 @@ static RAND_METHOD padlock_rand = {
#else /* !COMPILE_HW_PADLOCK */
#ifndef OPENSSL_NO_DYNAMIC_ENGINE
-extern
-int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
-extern
-int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) { return 0; }
+extern int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
+extern int
+bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) {
+ return 0;
+}
IMPLEMENT_DYNAMIC_CHECK_FN()
#endif
#endif /* COMPILE_HW_PADLOCK */
diff --git a/lib/libcrypto/engine/eng_pkey.c b/lib/libcrypto/engine/eng_pkey.c
index 410a9c3373a..dc832450a68 100644
--- a/lib/libcrypto/engine/eng_pkey.c
+++ b/lib/libcrypto/engine/eng_pkey.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: eng_pkey.c,v 1.4 2014/06/12 15:49:29 deraadt Exp $ */
+/* $OpenBSD: eng_pkey.c,v 1.5 2014/06/22 12:05:09 jsing Exp $ */
/* ====================================================================
* Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
*
@@ -7,7 +7,7 @@
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
@@ -57,140 +57,137 @@
/* Basic get/set stuff */
-int ENGINE_set_load_privkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpriv_f)
- {
+int
+ENGINE_set_load_privkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpriv_f)
+{
e->load_privkey = loadpriv_f;
return 1;
- }
+}
-int ENGINE_set_load_pubkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpub_f)
- {
+int
+ENGINE_set_load_pubkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpub_f)
+{
e->load_pubkey = loadpub_f;
return 1;
- }
+}
-int ENGINE_set_load_ssl_client_cert_function(ENGINE *e,
- ENGINE_SSL_CLIENT_CERT_PTR loadssl_f)
- {
+int
+ENGINE_set_load_ssl_client_cert_function(ENGINE *e,
+ ENGINE_SSL_CLIENT_CERT_PTR loadssl_f)
+{
e->load_ssl_client_cert = loadssl_f;
return 1;
- }
+}
-ENGINE_LOAD_KEY_PTR ENGINE_get_load_privkey_function(const ENGINE *e)
- {
+ENGINE_LOAD_KEY_PTR
+ENGINE_get_load_privkey_function(const ENGINE *e)
+{
return e->load_privkey;
- }
+}
-ENGINE_LOAD_KEY_PTR ENGINE_get_load_pubkey_function(const ENGINE *e)
- {
+ENGINE_LOAD_KEY_PTR
+ENGINE_get_load_pubkey_function(const ENGINE *e)
+{
return e->load_pubkey;
- }
+}
-ENGINE_SSL_CLIENT_CERT_PTR ENGINE_get_ssl_client_cert_function(const ENGINE *e)
- {
+ENGINE_SSL_CLIENT_CERT_PTR
+ENGINE_get_ssl_client_cert_function(const ENGINE *e)
+{
return e->load_ssl_client_cert;
- }
+}
/* API functions to load public/private keys */
-EVP_PKEY *ENGINE_load_private_key(ENGINE *e, const char *key_id,
- UI_METHOD *ui_method, void *callback_data)
- {
+EVP_PKEY *
+ENGINE_load_private_key(ENGINE *e, const char *key_id, UI_METHOD *ui_method,
+ void *callback_data)
+{
EVP_PKEY *pkey;
- if(e == NULL)
- {
+ if (e == NULL) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PRIVATE_KEY,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
+ }
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(e->funct_ref == 0)
- {
+ if (e->funct_ref == 0) {
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
ENGINEerr(ENGINE_F_ENGINE_LOAD_PRIVATE_KEY,
- ENGINE_R_NOT_INITIALISED);
+ ENGINE_R_NOT_INITIALISED);
return 0;
- }
+ }
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
- if (!e->load_privkey)
- {
+ if (!e->load_privkey) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PRIVATE_KEY,
- ENGINE_R_NO_LOAD_FUNCTION);
+ ENGINE_R_NO_LOAD_FUNCTION);
return 0;
- }
+ }
pkey = e->load_privkey(e, key_id, ui_method, callback_data);
- if (!pkey)
- {
+ if (!pkey) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PRIVATE_KEY,
- ENGINE_R_FAILED_LOADING_PRIVATE_KEY);
+ ENGINE_R_FAILED_LOADING_PRIVATE_KEY);
return 0;
- }
- return pkey;
}
+ return pkey;
+}
-EVP_PKEY *ENGINE_load_public_key(ENGINE *e, const char *key_id,
- UI_METHOD *ui_method, void *callback_data)
- {
+EVP_PKEY *
+ENGINE_load_public_key(ENGINE *e, const char *key_id, UI_METHOD *ui_method,
+ void *callback_data)
+{
EVP_PKEY *pkey;
- if(e == NULL)
- {
+ if (e == NULL) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PUBLIC_KEY,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
+ }
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(e->funct_ref == 0)
- {
+ if (e->funct_ref == 0) {
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
ENGINEerr(ENGINE_F_ENGINE_LOAD_PUBLIC_KEY,
- ENGINE_R_NOT_INITIALISED);
+ ENGINE_R_NOT_INITIALISED);
return 0;
- }
+ }
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
- if (!e->load_pubkey)
- {
+ if (!e->load_pubkey) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PUBLIC_KEY,
- ENGINE_R_NO_LOAD_FUNCTION);
+ ENGINE_R_NO_LOAD_FUNCTION);
return 0;
- }
+ }
pkey = e->load_pubkey(e, key_id, ui_method, callback_data);
- if (!pkey)
- {
+ if (!pkey) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_PUBLIC_KEY,
- ENGINE_R_FAILED_LOADING_PUBLIC_KEY);
+ ENGINE_R_FAILED_LOADING_PUBLIC_KEY);
return 0;
- }
- return pkey;
}
+ return pkey;
+}
-int ENGINE_load_ssl_client_cert(ENGINE *e, SSL *s,
- STACK_OF(X509_NAME) *ca_dn, X509 **pcert, EVP_PKEY **ppkey,
- STACK_OF(X509) **pother, UI_METHOD *ui_method, void *callback_data)
- {
-
- if(e == NULL)
- {
+int
+ENGINE_load_ssl_client_cert(ENGINE *e, SSL *s, STACK_OF(X509_NAME) *ca_dn,
+ X509 **pcert, EVP_PKEY **ppkey, STACK_OF(X509) **pother,
+ UI_METHOD *ui_method, void *callback_data)
+{
+ if (e == NULL) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT,
- ERR_R_PASSED_NULL_PARAMETER);
+ ERR_R_PASSED_NULL_PARAMETER);
return 0;
- }
+ }
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(e->funct_ref == 0)
- {
+ if (e->funct_ref == 0) {
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
ENGINEerr(ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT,
- ENGINE_R_NOT_INITIALISED);
+ ENGINE_R_NOT_INITIALISED);
return 0;
- }
+ }
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
- if (!e->load_ssl_client_cert)
- {
+ if (!e->load_ssl_client_cert) {
ENGINEerr(ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT,
- ENGINE_R_NO_LOAD_FUNCTION);
+ ENGINE_R_NO_LOAD_FUNCTION);
return 0;
- }
- return e->load_ssl_client_cert(e, s, ca_dn, pcert, ppkey, pother,
- ui_method, callback_data);
}
+ return e->load_ssl_client_cert(e, s, ca_dn, pcert, ppkey, pother,
+ ui_method, callback_data);
+}
diff --git a/lib/libcrypto/engine/eng_rsax.c b/lib/libcrypto/engine/eng_rsax.c
index f7b38b11560..ee184390702 100644
--- a/lib/libcrypto/engine/eng_rsax.c
+++ b/lib/libcrypto/engine/eng_rsax.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: eng_rsax.c,v 1.6 2014/06/12 15:49:29 deraadt Exp $ */
+/* $OpenBSD: eng_rsax.c,v 1.7 2014/06/22 12:05:09 jsing Exp $ */
/* Copyright (c) 2010-2010 Intel Corp.
* Author: Vinodh.Gopal@intel.com
* Jim Guilford
@@ -85,16 +85,17 @@ static ENGINE *ENGINE_rsax (void);
#endif
void ENGINE_load_rsax (void)
- {
+{
/* On non-x86 CPUs it just returns. */
#ifdef COMPILE_RSAX
ENGINE *toadd = ENGINE_rsax();
- if(!toadd) return;
+ if (!toadd)
+ return;
ENGINE_add(toadd);
ENGINE_free(toadd);
ERR_clear_error();
#endif
- }
+}
#ifdef COMPILE_RSAX
#define E_RSAX_LIB_NAME "rsax engine"
@@ -106,13 +107,14 @@ static int e_rsax_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f)(void));
#ifndef OPENSSL_NO_RSA
/* RSA stuff */
-static int e_rsax_rsa_mod_exp(BIGNUM *r, const BIGNUM *I, RSA *rsa, BN_CTX *ctx);
+static int e_rsax_rsa_mod_exp(BIGNUM *r, const BIGNUM *I, RSA *rsa,
+ BN_CTX *ctx);
static int e_rsax_rsa_finish(RSA *r);
#endif
static const ENGINE_CMD_DEFN e_rsax_cmd_defns[] = {
{0, NULL, NULL, 0}
- };
+};
#ifndef OPENSSL_NO_RSA
/* Our internal RSA_METHOD that we provide pointers to */
@@ -129,21 +131,22 @@ static const char *engine_e_rsax_id = "rsax";
static const char *engine_e_rsax_name = "RSAX engine support";
/* This internal function is used by ENGINE_rsax() */
-static int bind_helper(ENGINE *e)
- {
+static int
+bind_helper(ENGINE *e)
+{
#ifndef OPENSSL_NO_RSA
const RSA_METHOD *meth1;
#endif
- if(!ENGINE_set_id(e, engine_e_rsax_id) ||
- !ENGINE_set_name(e, engine_e_rsax_name) ||
+ if (!ENGINE_set_id(e, engine_e_rsax_id) ||
+ !ENGINE_set_name(e, engine_e_rsax_name) ||
#ifndef OPENSSL_NO_RSA
- !ENGINE_set_RSA(e, &e_rsax_rsa) ||
+ !ENGINE_set_RSA(e, &e_rsax_rsa) ||
#endif
- !ENGINE_set_destroy_function(e, e_rsax_destroy) ||
- !ENGINE_set_init_function(e, e_rsax_init) ||
- !ENGINE_set_finish_function(e, e_rsax_finish) ||
- !ENGINE_set_ctrl_function(e, e_rsax_ctrl) ||
- !ENGINE_set_cmd_defns(e, e_rsax_cmd_defns))
+ !ENGINE_set_destroy_function(e, e_rsax_destroy) ||
+ !ENGINE_set_init_function(e, e_rsax_init) ||
+ !ENGINE_set_finish_function(e, e_rsax_finish) ||
+ !ENGINE_set_ctrl_function(e, e_rsax_ctrl) ||
+ !ENGINE_set_cmd_defns(e, e_rsax_cmd_defns))
return 0;
#ifndef OPENSSL_NO_RSA
@@ -155,64 +158,67 @@ static int bind_helper(ENGINE *e)
e_rsax_rsa.bn_mod_exp = meth1->bn_mod_exp;
#endif
return 1;
- }
+}
-static ENGINE *ENGINE_rsax(void)
- {
+static ENGINE *
+ENGINE_rsax(void)
+{
ENGINE *ret = ENGINE_new();
- if(!ret)
+
+ if (!ret)
return NULL;
- if(!bind_helper(ret))
- {
+ if (!bind_helper(ret)) {
ENGINE_free(ret);
return NULL;
- }
- return ret;
}
+ return ret;
+}
#ifndef OPENSSL_NO_RSA
/* Used to attach our own key-data to an RSA structure */
static int rsax_ex_data_idx = -1;
#endif
-static int e_rsax_destroy(ENGINE *e)
- {
+static int
+e_rsax_destroy(ENGINE *e)
+{
return 1;
- }
+}
/* (de)initialisation functions. */
-static int e_rsax_init(ENGINE *e)
- {
+static int
+e_rsax_init(ENGINE *e)
+{
#ifndef OPENSSL_NO_RSA
if (rsax_ex_data_idx == -1)
- rsax_ex_data_idx = RSA_get_ex_new_index(0,
- NULL,
- NULL, NULL, NULL);
+ rsax_ex_data_idx = RSA_get_ex_new_index(0, NULL, NULL,
+ NULL, NULL);
#endif
if (rsax_ex_data_idx == -1)
return 0;
return 1;
- }
+}
-static int e_rsax_finish(ENGINE *e)
- {
+static int
+e_rsax_finish(ENGINE *e)
+{
return 1;
- }
+}
-static int e_rsax_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f)(void))
- {
+static int
+e_rsax_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f)(void))
+{
int to_return = 1;
- switch(cmd)
- {
- /* The command isn't understood by this engine */
+ switch (cmd) {
+ /* The command isn't understood by this engine */
default:
to_return = 0;
break;
- }
+ }
return to_return;
- }
+}
#ifndef OPENSSL_NO_RSA
@@ -233,69 +239,71 @@ static int interleaved_array_to_bn_512(BIGNUM* b, UINT64 *array);
/* Extract array elements from BIGNUM b
* To set the whole array from b, call with n=8
*/
-static int bn_extract_to_array_512(const BIGNUM* b, unsigned int n, UINT64 *array);
+static int bn_extract_to_array_512(const BIGNUM* b, unsigned int n,
+ UINT64 *array);
struct mod_ctx_512 {
- UINT64 t[8][8];
- UINT64 m[8];
- UINT64 m1[8]; /* 2^278 % m */
- UINT64 m2[8]; /* 2^640 % m */
- UINT64 k1[2]; /* (- 1/m) % 2^128 */
+ UINT64 t[8][8];
+ UINT64 m[8];
+ UINT64 m1[8]; /* 2^278 % m */
+ UINT64 m2[8]; /* 2^640 % m */
+ UINT64 k1[2]; /* (- 1/m) % 2^128 */
};
static int mod_exp_pre_compute_data_512(UINT64 *m, struct mod_ctx_512 *data);
void mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
- UINT64 *g, /* 512 bits, 8 qwords */
- UINT64 *exp, /* 512 bits, 8 qwords */
- struct mod_ctx_512 *data);
-
-typedef struct st_e_rsax_mod_ctx
-{
- UINT64 type;
- union {
- struct mod_ctx_512 b512;
- } ctx;
-
+UINT64 *g, /* 512 bits, 8 qwords */
+UINT64 *exp, /* 512 bits, 8 qwords */
+struct mod_ctx_512 *data);
+
+typedef struct st_e_rsax_mod_ctx {
+ UINT64 type;
+ union {
+ struct mod_ctx_512 b512;
+ } ctx;
} E_RSAX_MOD_CTX;
-static E_RSAX_MOD_CTX *e_rsax_get_ctx(RSA *rsa, int idx, BIGNUM* m)
+static E_RSAX_MOD_CTX *
+e_rsax_get_ctx(RSA *rsa, int idx, BIGNUM* m)
{
E_RSAX_MOD_CTX *hptr;
- if (idx < 0 || idx > 2)
- return NULL;
+ if (idx < 0 || idx > 2)
+ return NULL;
hptr = RSA_get_ex_data(rsa, rsax_ex_data_idx);
if (!hptr) {
- hptr = reallocarray(NULL, 3, sizeof(E_RSAX_MOD_CTX));
- if (!hptr) return NULL;
- hptr[2].type = hptr[1].type= hptr[0].type = 0;
- RSA_set_ex_data(rsa, rsax_ex_data_idx, hptr);
- }
-
- if (hptr[idx].type == (UINT64)BN_num_bits(m))
- return hptr+idx;
-
- if (BN_num_bits(m) == 512) {
- UINT64 _m[8];
- bn_extract_to_array_512(m, 8, _m);
- memset( &hptr[idx].ctx.b512, 0, sizeof(struct mod_ctx_512));
- mod_exp_pre_compute_data_512(_m, &hptr[idx].ctx.b512);
+ hptr = reallocarray(NULL, 3, sizeof(E_RSAX_MOD_CTX));
+ if (!hptr)
+ return NULL;
+ hptr[2].type = hptr[1].type = hptr[0].type = 0;
+ RSA_set_ex_data(rsa, rsax_ex_data_idx, hptr);
+ }
+
+ if (hptr[idx].type == (UINT64)BN_num_bits(m))
+ return hptr + idx;
+
+ if (BN_num_bits(m) == 512) {
+ UINT64 _m[8];
+ bn_extract_to_array_512(m, 8, _m);
+ memset( &hptr[idx].ctx.b512, 0, sizeof(struct mod_ctx_512));
+ mod_exp_pre_compute_data_512(_m, &hptr[idx].ctx.b512);
}
- hptr[idx].type = BN_num_bits(m);
- return hptr+idx;
+ hptr[idx].type = BN_num_bits(m);
+ return hptr + idx;
}
-static int e_rsax_rsa_finish(RSA *rsa)
- {
+static int
+e_rsax_rsa_finish(RSA *rsa)
+{
E_RSAX_MOD_CTX *hptr = RSA_get_ex_data(rsa, rsax_ex_data_idx);
- if(hptr)
- {
+
+ if (hptr) {
free(hptr);
RSA_set_ex_data(rsa, rsax_ex_data_idx, NULL);
- }
+ }
if (rsa->_method_mod_n)
BN_MONT_CTX_free(rsa->_method_mod_n);
if (rsa->_method_mod_p)
@@ -303,28 +311,28 @@ static int e_rsax_rsa_finish(RSA *rsa)
if (rsa->_method_mod_q)
BN_MONT_CTX_free(rsa->_method_mod_q);
return 1;
- }
-
+}
-static int e_rsax_bn_mod_exp(BIGNUM *r, const BIGNUM *g, const BIGNUM *e,
- const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont, E_RSAX_MOD_CTX* rsax_mod_ctx )
+static int
+e_rsax_bn_mod_exp(BIGNUM *r, const BIGNUM *g, const BIGNUM *e, const BIGNUM *m,
+ BN_CTX *ctx, BN_MONT_CTX *in_mont, E_RSAX_MOD_CTX* rsax_mod_ctx)
{
if (rsax_mod_ctx && BN_get_flags(e, BN_FLG_CONSTTIME) != 0) {
- if (BN_num_bits(m) == 512) {
- UINT64 _r[8];
- UINT64 _g[8];
- UINT64 _e[8];
-
- /* Init the arrays from the BIGNUMs */
- bn_extract_to_array_512(g, 8, _g);
- bn_extract_to_array_512(e, 8, _e);
-
- mod_exp_512(_r, _g, _e, &rsax_mod_ctx->ctx.b512);
- /* Return the result in the BIGNUM */
- interleaved_array_to_bn_512(r, _r);
- return 1;
- }
- }
+ if (BN_num_bits(m) == 512) {
+ UINT64 _r[8];
+ UINT64 _g[8];
+ UINT64 _e[8];
+
+ /* Init the arrays from the BIGNUMs */
+ bn_extract_to_array_512(g, 8, _g);
+ bn_extract_to_array_512(e, 8, _e);
+
+ mod_exp_512(_r, _g, _e, &rsax_mod_ctx->ctx.b512);
+ /* Return the result in the BIGNUM */
+ interleaved_array_to_bn_512(r, _r);
+ return 1;
+ }
+ }
return BN_mod_exp_mont(r, g, e, m, ctx, in_mont);
}
@@ -339,146 +347,177 @@ static int e_rsax_bn_mod_exp(BIGNUM *r, const BIGNUM *g, const BIGNUM *e,
* Local method: extracts a piece from a BIGNUM, to fit it into
* an array. Call with n=8 to extract an entire 512-bit BIGNUM
*/
-static int bn_extract_to_array_512(const BIGNUM* b, unsigned int n, UINT64 *array)
+static int
+bn_extract_to_array_512(const BIGNUM* b, unsigned int n, UINT64 *array)
{
int i;
UINT64 tmp;
unsigned char bn_buff[64];
+
memset(bn_buff, 0, 64);
if (BN_num_bytes(b) > 64) {
printf ("Can't support this byte size\n");
- return 0; }
- if (BN_num_bytes(b)!=0) {
- if (!BN_bn2bin(b, bn_buff+(64-BN_num_bytes(b)))) {
+ return 0;
+ }
+ if (BN_num_bytes(b) != 0) {
+ if (!BN_bn2bin(b, bn_buff + (64 - BN_num_bytes(b)))) {
printf ("Error's in bn2bin\n");
/* We have to error, here */
- return 0; } }
+ return 0;
+ }
+ }
while (n-- > 0) {
array[n] = 0;
- for (i=7; i>=0; i--) {
- tmp = bn_buff[63-(n*8+i)];
- array[n] |= tmp << (8*i); } }
+ for (i = 7; i >= 0; i--) {
+ tmp = bn_buff[63 - (n*8 + i)];
+ array[n] |= tmp << (8*i);
+ }
+ }
return 1;
}
/* Init a 512-bit BIGNUM from the UINT64*_ (8 * 64) interleaved array */
-static int interleaved_array_to_bn_512(BIGNUM* b, UINT64 *array)
+static int
+interleaved_array_to_bn_512(BIGNUM* b, UINT64 *array)
{
unsigned char tmp[64];
- int n=8;
+ int n = 8;
int i;
+
while (n-- > 0) {
- for (i = 7; i>=0; i--) {
- tmp[63-(n*8+i)] = (unsigned char)(array[n]>>(8*i)); } }
+ for (i = 7; i >= 0; i--) {
+ tmp[63 - (n * 8 + i)] =
+ (unsigned char)(array[n] >> (8 * i));
+ }
+ }
BN_bin2bn(tmp, 64, b);
- return 0;
+ return 0;
}
-
/* The main 512bit precompute call */
-static int mod_exp_pre_compute_data_512(UINT64 *m, struct mod_ctx_512 *data)
- {
- BIGNUM two_768, two_640, two_128, two_512, tmp, _m, tmp2;
-
- /* We need a BN_CTX for the modulo functions */
- BN_CTX* ctx;
- /* Some tmps */
- UINT64 _t[8];
- int i, j, ret = 0;
-
- /* Init _m with m */
- BN_init(&_m);
- interleaved_array_to_bn_512(&_m, m);
- memset(_t, 0, 64);
-
- /* Inits */
- BN_init(&two_768);
- BN_init(&two_640);
- BN_init(&two_128);
- BN_init(&two_512);
- BN_init(&tmp);
- BN_init(&tmp2);
-
- /* Create our context */
- if ((ctx=BN_CTX_new()) == NULL) { goto err; }
+static int
+mod_exp_pre_compute_data_512(UINT64 *m, struct mod_ctx_512 *data)
+{
+ BIGNUM two_768, two_640, two_128, two_512, tmp, _m, tmp2;
+
+ /* We need a BN_CTX for the modulo functions */
+ BN_CTX* ctx;
+ /* Some tmps */
+ UINT64 _t[8];
+ int i, j, ret = 0;
+
+ /* Init _m with m */
+ BN_init(&_m);
+ interleaved_array_to_bn_512(&_m, m);
+ memset(_t, 0, 64);
+
+ /* Inits */
+ BN_init(&two_768);
+ BN_init(&two_640);
+ BN_init(&two_128);
+ BN_init(&two_512);
+ BN_init(&tmp);
+ BN_init(&tmp2);
+
+ /* Create our context */
+ if ((ctx = BN_CTX_new()) == NULL) {
+ goto err;
+ }
BN_CTX_start(ctx);
- /*
- * For production, if you care, these only need to be set once,
- * and may be made constants.
- */
- BN_lshift(&two_768, BN_value_one(), 768);
- BN_lshift(&two_640, BN_value_one(), 640);
- BN_lshift(&two_128, BN_value_one(), 128);
- BN_lshift(&two_512, BN_value_one(), 512);
-
- if (0 == (m[7] & 0x8000000000000000)) {
- exit(1);
- }
- if (0 == (m[0] & 0x1)) { /* Odd modulus required for Mont */
- exit(1);
- }
-
- /* Precompute m1 */
- BN_mod(&tmp, &two_768, &_m, ctx);
- if (!bn_extract_to_array_512(&tmp, 8, &data->m1[0])) {
- goto err; }
-
- /* Precompute m2 */
- BN_mod(&tmp, &two_640, &_m, ctx);
- if (!bn_extract_to_array_512(&tmp, 8, &data->m2[0])) {
- goto err;
- }
-
- /*
- * Precompute k1, a 128b number = ((-1)* m-1 ) mod 2128; k1 should
- * be non-negative.
- */
- BN_mod_inverse(&tmp, &_m, &two_128, ctx);
- if (!BN_is_zero(&tmp)) { BN_sub(&tmp, &two_128, &tmp); }
- if (!bn_extract_to_array_512(&tmp, 2, &data->k1[0])) {
- goto err; }
-
- /* Precompute t */
- for (i=0; i<8; i++) {
- BN_zero(&tmp);
- if (i & 1) { BN_add(&tmp, &two_512, &tmp); }
- if (i & 2) { BN_add(&tmp, &two_512, &tmp); }
- if (i & 4) { BN_add(&tmp, &two_640, &tmp); }
-
- BN_nnmod(&tmp2, &tmp, &_m, ctx);
- if (!bn_extract_to_array_512(&tmp2, 8, _t)) {
- goto err; }
- for (j=0; j<8; j++) data->t[j][i] = _t[j]; }
-
- /* Precompute m */
- for (i=0; i<8; i++) {
- data->m[i] = m[i]; }
-
- ret = 1;
+ /*
+ * For production, if you care, these only need to be set once,
+ * and may be made constants.
+ */
+ BN_lshift(&two_768, BN_value_one(), 768);
+ BN_lshift(&two_640, BN_value_one(), 640);
+ BN_lshift(&two_128, BN_value_one(), 128);
+ BN_lshift(&two_512, BN_value_one(), 512);
+
+ if (0 == (m[7] & 0x8000000000000000)) {
+ exit(1);
+ }
+ if (0 == (m[0] & 0x1)) {
+ /* Odd modulus required for Mont */
+ exit(1);
+ }
+
+ /* Precompute m1 */
+ BN_mod(&tmp, &two_768, &_m, ctx);
+ if (!bn_extract_to_array_512(&tmp, 8, &data->m1[0])) {
+ goto err;
+ }
+
+ /* Precompute m2 */
+ BN_mod(&tmp, &two_640, &_m, ctx);
+ if (!bn_extract_to_array_512(&tmp, 8, &data->m2[0])) {
+ goto err;
+ }
+
+ /*
+ * Precompute k1, a 128b number = ((-1)* m-1 ) mod 2128; k1 should
+ * be non-negative.
+ */
+ BN_mod_inverse(&tmp, &_m, &two_128, ctx);
+ if (!BN_is_zero(&tmp)) {
+ BN_sub(&tmp, &two_128, &tmp);
+ }
+ if (!bn_extract_to_array_512(&tmp, 2, &data->k1[0])) {
+ goto err;
+ }
+
+ /* Precompute t */
+ for (i = 0; i < 8; i++) {
+ BN_zero(&tmp);
+ if (i & 1) {
+ BN_add(&tmp, &two_512, &tmp);
+ }
+ if (i & 2) {
+ BN_add(&tmp, &two_512, &tmp);
+ }
+ if (i & 4) {
+ BN_add(&tmp, &two_640, &tmp);
+ }
+
+ BN_nnmod(&tmp2, &tmp, &_m, ctx);
+ if (!bn_extract_to_array_512(&tmp2, 8, _t)) {
+ goto err;
+ }
+ for (j = 0; j < 8; j++)
+ data->t[j][i] = _t[j];
+ }
+
+ /* Precompute m */
+ for (i = 0; i < 8; i++) {
+ data->m[i] = m[i];
+ }
+
+ ret = 1;
err:
- /* Cleanup */
+ /* Cleanup */
if (ctx != NULL) {
- BN_CTX_end(ctx); BN_CTX_free(ctx); }
- BN_free(&two_768);
- BN_free(&two_640);
- BN_free(&two_128);
- BN_free(&two_512);
- BN_free(&tmp);
- BN_free(&tmp2);
- BN_free(&_m);
-
- return ret;
-}
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ }
+ BN_free(&two_768);
+ BN_free(&two_640);
+ BN_free(&two_128);
+ BN_free(&two_512);
+ BN_free(&tmp);
+ BN_free(&tmp2);
+ BN_free(&_m);
+ return ret;
+}
-static int e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx)
- {
- BIGNUM *r1,*m1,*vrfy;
- BIGNUM local_dmp1,local_dmq1,local_c,local_r1;
- BIGNUM *dmp1,*dmq1,*c,*pr1;
- int ret=0;
+static int
+e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx)
+{
+ BIGNUM *r1, *m1, *vrfy;
+ BIGNUM local_dmp1, local_dmq1, local_c, local_r1;
+ BIGNUM *dmp1, *dmq1, *c, *pr1;
+ int ret = 0;
BN_CTX_start(ctx);
r1 = BN_CTX_get(ctx);
@@ -494,8 +533,7 @@ static int e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx
* intialization uses the BN_FLG_CONSTTIME flag
* (unless RSA_FLAG_NO_CONSTTIME is set)
*/
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
BN_init(&local_p);
p = &local_p;
BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME);
@@ -503,100 +541,97 @@ static int e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx
BN_init(&local_q);
q = &local_q;
BN_with_flags(q, rsa->q, BN_FLG_CONSTTIME);
- }
- else
- {
+ } else {
p = rsa->p;
q = rsa->q;
- }
+ }
- if (rsa->flags & RSA_FLAG_CACHE_PRIVATE)
- {
- if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_p, CRYPTO_LOCK_RSA, p, ctx))
+ if (rsa->flags & RSA_FLAG_CACHE_PRIVATE) {
+ if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_p,
+ CRYPTO_LOCK_RSA, p, ctx))
error = 1;
- if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_q, CRYPTO_LOCK_RSA, q, ctx))
+ if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_q,
+ CRYPTO_LOCK_RSA, q, ctx))
error = 1;
- }
+ }
/* clean up */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
BN_free(&local_p);
BN_free(&local_q);
- }
- if ( error )
+ }
+ if (error )
goto err;
}
if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
- if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, CRYPTO_LOCK_RSA, rsa->n, ctx))
+ if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n,
+ CRYPTO_LOCK_RSA, rsa->n, ctx))
goto err;
/* compute I mod q */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
c = &local_c;
BN_with_flags(c, I, BN_FLG_CONSTTIME);
- if (!BN_mod(r1,c,rsa->q,ctx)) goto err;
- }
- else
- {
- if (!BN_mod(r1,I,rsa->q,ctx)) goto err;
- }
+ if (!BN_mod(r1, c,rsa->q, ctx))
+ goto err;
+ } else {
+ if (!BN_mod(r1, I,rsa->q, ctx))
+ goto err;
+ }
/* compute r1^dmq1 mod q */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
dmq1 = &local_dmq1;
BN_with_flags(dmq1, rsa->dmq1, BN_FLG_CONSTTIME);
- }
- else
+ } else
dmq1 = rsa->dmq1;
- if (!e_rsax_bn_mod_exp(m1,r1,dmq1,rsa->q,ctx,
- rsa->_method_mod_q, e_rsax_get_ctx(rsa, 0, rsa->q) )) goto err;
+ if (!e_rsax_bn_mod_exp(m1, r1, dmq1, rsa->q, ctx, rsa->_method_mod_q,
+ e_rsax_get_ctx(rsa, 0, rsa->q)))
+ goto err;
/* compute I mod p */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
c = &local_c;
BN_with_flags(c, I, BN_FLG_CONSTTIME);
- if (!BN_mod(r1,c,rsa->p,ctx)) goto err;
- }
- else
- {
- if (!BN_mod(r1,I,rsa->p,ctx)) goto err;
- }
+ if (!BN_mod(r1, c,rsa->p, ctx))
+ goto err;
+ } else {
+ if (!BN_mod(r1, I,rsa->p, ctx))
+ goto err;
+ }
/* compute r1^dmp1 mod p */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
dmp1 = &local_dmp1;
BN_with_flags(dmp1, rsa->dmp1, BN_FLG_CONSTTIME);
- }
- else
+ } else
dmp1 = rsa->dmp1;
- if (!e_rsax_bn_mod_exp(r0,r1,dmp1,rsa->p,ctx,
- rsa->_method_mod_p, e_rsax_get_ctx(rsa, 1, rsa->p) )) goto err;
+ if (!e_rsax_bn_mod_exp(r0, r1, dmp1, rsa->p, ctx, rsa->_method_mod_p,
+ e_rsax_get_ctx(rsa, 1, rsa->p)))
+ goto err;
- if (!BN_sub(r0,r0,m1)) goto err;
+ if (!BN_sub(r0, r0, m1))
+ goto err;
/* This will help stop the size of r0 increasing, which does
* affect the multiply if it optimised for a power of 2 size */
if (BN_is_negative(r0))
- if (!BN_add(r0,r0,rsa->p)) goto err;
+ if (!BN_add(r0, r0, rsa->p))
+ goto err;
- if (!BN_mul(r1,r0,rsa->iqmp,ctx)) goto err;
+ if (!BN_mul(r1, r0, rsa->iqmp, ctx))
+ goto err;
/* Turn BN_FLG_CONSTTIME flag on before division operation */
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
pr1 = &local_r1;
BN_with_flags(pr1, r1, BN_FLG_CONSTTIME);
- }
- else
+ } else
pr1 = r1;
- if (!BN_mod(r0,pr1,rsa->p,ctx)) goto err;
+ if (!BN_mod(r0, pr1, rsa->p, ctx))
+ goto err;
/* If p < q it is occasionally possible for the correction of
* adding 'p' if r0 is negative above to leave the result still
@@ -606,25 +641,30 @@ static int e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx
* they ensure p > q [steve]
*/
if (BN_is_negative(r0))
- if (!BN_add(r0,r0,rsa->p)) goto err;
- if (!BN_mul(r1,r0,rsa->q,ctx)) goto err;
- if (!BN_add(r0,r1,m1)) goto err;
-
- if (rsa->e && rsa->n)
- {
- if (!e_rsax_bn_mod_exp(vrfy,r0,rsa->e,rsa->n,ctx,rsa->_method_mod_n, e_rsax_get_ctx(rsa, 2, rsa->n) ))
- goto err;
+ if (!BN_add(r0, r0, rsa->p))
+ goto err;
+ if (!BN_mul(r1, r0, rsa->q, ctx))
+ goto err;
+ if (!BN_add(r0, r1, m1))
+ goto err;
+
+ if (rsa->e && rsa->n) {
+ if (!e_rsax_bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx,
+ rsa->_method_mod_n, e_rsax_get_ctx(rsa, 2, rsa->n)))
+ goto err;
/* If 'I' was greater than (or equal to) rsa->n, the operation
* will be equivalent to using 'I mod n'. However, the result of
* the verify will *always* be less than 'n' so we don't check
* for absolute equality, just congruency. */
- if (!BN_sub(vrfy, vrfy, I)) goto err;
- if (!BN_mod(vrfy, vrfy, rsa->n, ctx)) goto err;
+ if (!BN_sub(vrfy, vrfy, I))
+ goto err;
+ if (!BN_mod(vrfy, vrfy, rsa->n, ctx))
+ goto err;
if (BN_is_negative(vrfy))
- if (!BN_add(vrfy, vrfy, rsa->n)) goto err;
- if (!BN_is_zero(vrfy))
- {
+ if (!BN_add(vrfy, vrfy, rsa->n))
+ goto err;
+ if (!BN_is_zero(vrfy)) {
/* 'I' and 'vrfy' aren't congruent mod n. Don't leak
* miscalculated CRT output, just do a raw (slower)
* mod_exp and return that instead. */
@@ -632,23 +672,22 @@ static int e_rsax_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx
BIGNUM local_d;
BIGNUM *d = NULL;
- if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME))
- {
+ if (!(rsa->flags & RSA_FLAG_NO_CONSTTIME)) {
d = &local_d;
BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
- }
- else
+ } else
d = rsa->d;
- if (!e_rsax_bn_mod_exp(r0,I,d,rsa->n,ctx,
- rsa->_method_mod_n, e_rsax_get_ctx(rsa, 2, rsa->n) )) goto err;
- }
+ if (!e_rsax_bn_mod_exp(r0, I,d, rsa->n, ctx,
+ rsa->_method_mod_n, e_rsax_get_ctx(rsa, 2, rsa->n)))
+ goto err;
}
- ret=1;
+ }
+ ret = 1;
err:
BN_CTX_end(ctx);
return ret;
- }
+}
#endif /* !OPENSSL_NO_RSA */
#endif /* !COMPILE_RSAX */
diff --git a/lib/libcrypto/engine/eng_table.c b/lib/libcrypto/engine/eng_table.c
index 5781af1eb93..44f3e892b80 100644
--- a/lib/libcrypto/engine/eng_table.c
+++ b/lib/libcrypto/engine/eng_table.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: eng_table.c,v 1.5 2014/06/12 15:49:29 deraadt Exp $ */
+/* $OpenBSD: eng_table.c,v 1.6 2014/06/22 12:05:09 jsing Exp $ */
/* ====================================================================
* Copyright (c) 2001 The OpenSSL Project. All rights reserved.
*
@@ -7,7 +7,7 @@
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
@@ -59,8 +59,7 @@
#include "eng_int.h"
/* The type of the items in the table */
-typedef struct st_engine_pile
- {
+typedef struct st_engine_pile {
/* The 'nid' of this algorithm/mode */
int nid;
/* ENGINEs that implement this algorithm/mode. */
@@ -69,284 +68,287 @@ typedef struct st_engine_pile
ENGINE *funct;
/* Zero if 'sk' is newer than the cached 'funct', non-zero otherwise */
int uptodate;
- } ENGINE_PILE;
+} ENGINE_PILE;
DECLARE_LHASH_OF(ENGINE_PILE);
/* The type exposed in eng_int.h */
-struct st_engine_table
- {
+struct st_engine_table {
LHASH_OF(ENGINE_PILE) piles;
- }; /* ENGINE_TABLE */
+}; /* ENGINE_TABLE */
-
-typedef struct st_engine_pile_doall
- {
+typedef struct st_engine_pile_doall {
engine_table_doall_cb *cb;
void *arg;
- } ENGINE_PILE_DOALL;
-
+} ENGINE_PILE_DOALL;
/* Global flags (ENGINE_TABLE_FLAG_***). */
static unsigned int table_flags = 0;
/* API function manipulating 'table_flags' */
-unsigned int ENGINE_get_table_flags(void)
- {
+unsigned int
+ENGINE_get_table_flags(void)
+{
return table_flags;
- }
+}
-void ENGINE_set_table_flags(unsigned int flags)
- {
+void
+ENGINE_set_table_flags(unsigned int flags)
+{
table_flags = flags;
- }
+}
/* Internal functions for the "piles" hash table */
-static unsigned long engine_pile_hash(const ENGINE_PILE *c)
- {
+static unsigned long
+engine_pile_hash(const ENGINE_PILE *c)
+{
return c->nid;
- }
+}
-static int engine_pile_cmp(const ENGINE_PILE *a, const ENGINE_PILE *b)
- {
+static int
+engine_pile_cmp(const ENGINE_PILE *a, const ENGINE_PILE *b)
+{
return a->nid - b->nid;
- }
+}
static IMPLEMENT_LHASH_HASH_FN(engine_pile, ENGINE_PILE)
static IMPLEMENT_LHASH_COMP_FN(engine_pile, ENGINE_PILE)
-static int int_table_check(ENGINE_TABLE **t, int create)
- {
+static int
+int_table_check(ENGINE_TABLE **t, int create)
+{
LHASH_OF(ENGINE_PILE) *lh;
- if(*t) return 1;
- if(!create) return 0;
- if((lh = lh_ENGINE_PILE_new()) == NULL)
+ if (*t)
+ return 1;
+ if (!create)
+ return 0;
+ if ((lh = lh_ENGINE_PILE_new()) == NULL)
return 0;
*t = (ENGINE_TABLE *)lh;
return 1;
- }
+}
/* Privately exposed (via eng_int.h) functions for adding and/or removing
* ENGINEs from the implementation table */
-int engine_table_register(ENGINE_TABLE **table, ENGINE_CLEANUP_CB *cleanup,
- ENGINE *e, const int *nids, int num_nids, int setdefault)
- {
+int
+engine_table_register(ENGINE_TABLE **table, ENGINE_CLEANUP_CB *cleanup,
+ ENGINE *e, const int *nids, int num_nids, int setdefault)
+{
int ret = 0, added = 0;
ENGINE_PILE tmplate, *fnd;
+
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(!(*table))
+ if (!(*table))
added = 1;
- if(!int_table_check(table, 1))
+ if (!int_table_check(table, 1))
goto end;
- if(added)
+ if (added)
/* The cleanup callback needs to be added */
engine_cleanup_add_first(cleanup);
- while(num_nids--)
- {
+ while (num_nids--) {
tmplate.nid = *nids;
fnd = lh_ENGINE_PILE_retrieve(&(*table)->piles, &tmplate);
- if(!fnd)
- {
+ if (!fnd) {
fnd = malloc(sizeof(ENGINE_PILE));
- if(!fnd) goto end;
+ if (!fnd)
+ goto end;
fnd->uptodate = 1;
fnd->nid = *nids;
fnd->sk = sk_ENGINE_new_null();
- if(!fnd->sk)
- {
+ if (!fnd->sk) {
free(fnd);
goto end;
- }
+ }
fnd->funct = NULL;
(void)lh_ENGINE_PILE_insert(&(*table)->piles, fnd);
- }
+ }
/* A registration shouldn't add duplciate entries */
(void)sk_ENGINE_delete_ptr(fnd->sk, e);
/* if 'setdefault', this ENGINE goes to the head of the list */
- if(!sk_ENGINE_push(fnd->sk, e))
+ if (!sk_ENGINE_push(fnd->sk, e))
goto end;
/* "touch" this ENGINE_PILE */
fnd->uptodate = 0;
- if(setdefault)
- {
- if(!engine_unlocked_init(e))
- {
+ if (setdefault) {
+ if (!engine_unlocked_init(e)) {
ENGINEerr(ENGINE_F_ENGINE_TABLE_REGISTER,
- ENGINE_R_INIT_FAILED);
+ ENGINE_R_INIT_FAILED);
goto end;
- }
- if(fnd->funct)
+ }
+ if (fnd->funct)
engine_unlocked_finish(fnd->funct, 0);
fnd->funct = e;
fnd->uptodate = 1;
- }
- nids++;
}
+ nids++;
+ }
ret = 1;
end:
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
return ret;
- }
-static void int_unregister_cb_doall_arg(ENGINE_PILE *pile, ENGINE *e)
- {
+}
+
+static void
+int_unregister_cb_doall_arg(ENGINE_PILE *pile, ENGINE *e)
+{
int n;
+
/* Iterate the 'c->sk' stack removing any occurance of 'e' */
- while((n = sk_ENGINE_find(pile->sk, e)) >= 0)
- {
+ while ((n = sk_ENGINE_find(pile->sk, e)) >= 0) {
(void)sk_ENGINE_delete(pile->sk, n);
pile->uptodate = 0;
- }
- if(pile->funct == e)
- {
+ }
+ if (pile->funct == e) {
engine_unlocked_finish(e, 0);
pile->funct = NULL;
- }
}
+}
static IMPLEMENT_LHASH_DOALL_ARG_FN(int_unregister_cb, ENGINE_PILE, ENGINE)
-void engine_table_unregister(ENGINE_TABLE **table, ENGINE *e)
- {
+void
+engine_table_unregister(ENGINE_TABLE **table, ENGINE *e)
+{
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(int_table_check(table, 0))
+ if (int_table_check(table, 0))
lh_ENGINE_PILE_doall_arg(&(*table)->piles,
- LHASH_DOALL_ARG_FN(int_unregister_cb),
- ENGINE, e);
+ LHASH_DOALL_ARG_FN(int_unregister_cb), ENGINE, e);
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
- }
+}
-static void int_cleanup_cb_doall(ENGINE_PILE *p)
- {
+static void
+int_cleanup_cb_doall(ENGINE_PILE *p)
+{
sk_ENGINE_free(p->sk);
- if(p->funct)
+ if (p->funct)
engine_unlocked_finish(p->funct, 0);
free(p);
- }
+}
static IMPLEMENT_LHASH_DOALL_FN(int_cleanup_cb, ENGINE_PILE)
-void engine_table_cleanup(ENGINE_TABLE **table)
- {
+void
+engine_table_cleanup(ENGINE_TABLE **table)
+{
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
- if(*table)
- {
+ if (*table) {
lh_ENGINE_PILE_doall(&(*table)->piles,
- LHASH_DOALL_FN(int_cleanup_cb));
+ LHASH_DOALL_FN(int_cleanup_cb));
lh_ENGINE_PILE_free(&(*table)->piles);
*table = NULL;
- }
- CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
}
+ CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
+}
/* return a functional reference for a given 'nid' */
#ifndef ENGINE_TABLE_DEBUG
-ENGINE *engine_table_select(ENGINE_TABLE **table, int nid)
+ENGINE *
+engine_table_select(ENGINE_TABLE **table, int nid)
#else
-ENGINE *engine_table_select_tmp(ENGINE_TABLE **table, int nid, const char *f, int l)
+ENGINE *
+engine_table_select_tmp(ENGINE_TABLE **table, int nid, const char *f, int l)
#endif
- {
+{
ENGINE *ret = NULL;
- ENGINE_PILE tmplate, *fnd=NULL;
+ ENGINE_PILE tmplate, *fnd = NULL;
int initres, loop = 0;
- if(!(*table))
- {
+ if (!(*table)) {
#ifdef ENGINE_TABLE_DEBUG
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, nothing "
- "registered!\n", f, l, nid);
+ "registered!\n", f, l, nid);
#endif
return NULL;
- }
+ }
ERR_set_mark();
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
/* Check again inside the lock otherwise we could race against cleanup
* operations. But don't worry about a fprintf(stderr). */
- if(!int_table_check(table, 0)) goto end;
+ if (!int_table_check(table, 0))
+ goto end;
tmplate.nid = nid;
fnd = lh_ENGINE_PILE_retrieve(&(*table)->piles, &tmplate);
- if(!fnd) goto end;
- if(fnd->funct && engine_unlocked_init(fnd->funct))
- {
+ if (!fnd)
+ goto end;
+ if (fnd->funct && engine_unlocked_init(fnd->funct)) {
#ifdef ENGINE_TABLE_DEBUG
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, using "
- "ENGINE '%s' cached\n", f, l, nid, fnd->funct->id);
+ "ENGINE '%s' cached\n", f, l, nid, fnd->funct->id);
#endif
ret = fnd->funct;
goto end;
- }
- if(fnd->uptodate)
- {
+ }
+ if (fnd->uptodate) {
ret = fnd->funct;
goto end;
- }
+ }
trynext:
ret = sk_ENGINE_value(fnd->sk, loop++);
- if(!ret)
- {
+ if (!ret) {
#ifdef ENGINE_TABLE_DEBUG
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, no "
- "registered implementations would initialise\n",
- f, l, nid);
+ "registered implementations would initialise\n", f, l, nid);
#endif
goto end;
- }
+ }
/* Try to initialise the ENGINE? */
- if((ret->funct_ref > 0) || !(table_flags & ENGINE_TABLE_FLAG_NOINIT))
+ if ((ret->funct_ref > 0) || !(table_flags & ENGINE_TABLE_FLAG_NOINIT))
initres = engine_unlocked_init(ret);
else
initres = 0;
- if(initres)
- {
+ if (initres) {
/* Update 'funct' */
- if((fnd->funct != ret) && engine_unlocked_init(ret))
- {
+ if ((fnd->funct != ret) && engine_unlocked_init(ret)) {
/* If there was a previous default we release it. */
- if(fnd->funct)
+ if (fnd->funct)
engine_unlocked_finish(fnd->funct, 0);
fnd->funct = ret;
#ifdef ENGINE_TABLE_DEBUG
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, "
- "setting default to '%s'\n", f, l, nid, ret->id);
+ "setting default to '%s'\n", f, l, nid, ret->id);
#endif
- }
+ }
#ifdef ENGINE_TABLE_DEBUG
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, using "
- "newly initialised '%s'\n", f, l, nid, ret->id);
+ "newly initialised '%s'\n", f, l, nid, ret->id);
#endif
goto end;
- }
+ }
goto trynext;
end:
/* If it failed, it is unlikely to succeed again until some future
* registrations have taken place. In all cases, we cache. */
- if(fnd) fnd->uptodate = 1;
+ if (fnd)
+ fnd->uptodate = 1;
#ifdef ENGINE_TABLE_DEBUG
- if(ret)
+ if (ret)
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, caching "
- "ENGINE '%s'\n", f, l, nid, ret->id);
+ "ENGINE '%s'\n", f, l, nid, ret->id);
else
fprintf(stderr, "engine_table_dbg: %s:%d, nid=%d, caching "
- "'no matching ENGINE'\n", f, l, nid);
+ "'no matching ENGINE'\n", f, l, nid);
#endif
CRYPTO_w_unlock(CRYPTO_LOCK_ENGINE);
/* Whatever happened, any failed init()s are not failures in this
* context, so clear our error state. */
ERR_pop_to_mark();
return ret;
- }
+}
/* Table enumeration */
-static void int_cb_doall_arg(ENGINE_PILE *pile, ENGINE_PILE_DOALL *dall)
- {
+static void
+int_cb_doall_arg(ENGINE_PILE *pile, ENGINE_PILE_DOALL *dall)
+{
dall->cb(pile->nid, pile->sk, pile->funct, dall->arg);
- }
-static IMPLEMENT_LHASH_DOALL_ARG_FN(int_cb, ENGINE_PILE,ENGINE_PILE_DOALL)
+}
+static IMPLEMENT_LHASH_DOALL_ARG_FN(int_cb, ENGINE_PILE, ENGINE_PILE_DOALL)
-void engine_table_doall(ENGINE_TABLE *table, engine_table_doall_cb *cb,
- void *arg)
- {
+void
+engine_table_doall(ENGINE_TABLE *table, engine_table_doall_cb *cb, void *arg)
+{
ENGINE_PILE_DOALL dall;
+
dall.cb = cb;
dall.arg = arg;
lh_ENGINE_PILE_doall_arg(&table->piles, LHASH_DOALL_ARG_FN(int_cb),
- ENGINE_PILE_DOALL, &dall);
- }
+ ENGINE_PILE_DOALL, &dall);
+}