summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTheo Buehler <tb@cvs.openbsd.org>2021-04-20 17:38:03 +0000
committerTheo Buehler <tb@cvs.openbsd.org>2021-04-20 17:38:03 +0000
commit35d10c1fd9772e9cb57842876e23f1b5aa6bb3b3 (patch)
tree537a3dbdf771ef9e9b91bcf65ae83e843b30b46c
parentb865d25575023451abc9c66364356beeb597911c (diff)
Fix indent of EC_METHODs as requested by jsing.
While there zap trailing whitespace from a KNF approximation gone wrong.
-rw-r--r--lib/libcrypto/ec/ecp_mont.c32
-rw-r--r--lib/libcrypto/ec/ecp_nist.c20
-rw-r--r--lib/libcrypto/ec/ecp_nistp224.c72
-rw-r--r--lib/libcrypto/ec/ecp_nistp256.c110
-rw-r--r--lib/libcrypto/ec/ecp_nistp521.c84
-rw-r--r--lib/libcrypto/ec/ecp_smpl.c66
6 files changed, 192 insertions, 192 deletions
diff --git a/lib/libcrypto/ec/ecp_mont.c b/lib/libcrypto/ec/ecp_mont.c
index 7160692e9e3..f371e3ec8f8 100644
--- a/lib/libcrypto/ec/ecp_mont.c
+++ b/lib/libcrypto/ec/ecp_mont.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_mont.c,v 1.18 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_mont.c,v 1.19 2021/04/20 17:38:02 tb Exp $ */
/*
* Originally written by Bodo Moeller for the OpenSSL project.
*/
@@ -80,20 +80,20 @@ EC_GFp_mont_method(void)
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
@@ -117,7 +117,7 @@ EC_GFp_mont_method(void)
}
-int
+int
ec_GFp_mont_group_init(EC_GROUP * group)
{
int ok;
@@ -129,7 +129,7 @@ ec_GFp_mont_group_init(EC_GROUP * group)
}
-void
+void
ec_GFp_mont_group_finish(EC_GROUP * group)
{
BN_MONT_CTX_free(group->field_data1);
@@ -140,7 +140,7 @@ ec_GFp_mont_group_finish(EC_GROUP * group)
}
-void
+void
ec_GFp_mont_group_clear_finish(EC_GROUP * group)
{
BN_MONT_CTX_free(group->field_data1);
@@ -151,7 +151,7 @@ ec_GFp_mont_group_clear_finish(EC_GROUP * group)
}
-int
+int
ec_GFp_mont_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
BN_MONT_CTX_free(dest->field_data1);
@@ -185,7 +185,7 @@ ec_GFp_mont_group_copy(EC_GROUP * dest, const EC_GROUP * src)
}
-int
+int
ec_GFp_mont_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
@@ -237,7 +237,7 @@ ec_GFp_mont_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a,
}
-int
+int
ec_GFp_mont_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
@@ -249,7 +249,7 @@ ec_GFp_mont_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
}
-int
+int
ec_GFp_mont_field_sqr(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
@@ -261,7 +261,7 @@ ec_GFp_mont_field_sqr(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
}
-int
+int
ec_GFp_mont_field_encode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
@@ -273,7 +273,7 @@ ec_GFp_mont_field_encode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
}
-int
+int
ec_GFp_mont_field_decode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
@@ -285,7 +285,7 @@ ec_GFp_mont_field_decode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
}
-int
+int
ec_GFp_mont_field_set_to_one(const EC_GROUP *group, BIGNUM *r, BN_CTX *ctx)
{
if (group->field_data2 == NULL) {
diff --git a/lib/libcrypto/ec/ecp_nist.c b/lib/libcrypto/ec/ecp_nist.c
index c34c708025d..14ebe7d126e 100644
--- a/lib/libcrypto/ec/ecp_nist.c
+++ b/lib/libcrypto/ec/ecp_nist.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_nist.c,v 1.16 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nist.c,v 1.17 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Nils Larsch for the OpenSSL project.
*/
@@ -81,20 +81,20 @@ EC_GFp_nist_method(void)
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
@@ -114,7 +114,7 @@ EC_GFp_nist_method(void)
return &ret;
}
-int
+int
ec_GFp_nist_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
dest->field_mod_func = src->field_mod_func;
@@ -122,7 +122,7 @@ ec_GFp_nist_group_copy(EC_GROUP * dest, const EC_GROUP * src)
return ec_GFp_simple_group_copy(dest, src);
}
-int
+int
ec_GFp_nist_group_set_curve(EC_GROUP *group, const BIGNUM *p,
const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
{
@@ -162,7 +162,7 @@ ec_GFp_nist_group_set_curve(EC_GROUP *group, const BIGNUM *p,
}
-int
+int
ec_GFp_nist_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
@@ -189,7 +189,7 @@ ec_GFp_nist_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
}
-int
+int
ec_GFp_nist_field_sqr(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a,
BN_CTX * ctx)
{
diff --git a/lib/libcrypto/ec/ecp_nistp224.c b/lib/libcrypto/ec/ecp_nistp224.c
index 10ade9dbb53..4ed45df8962 100644
--- a/lib/libcrypto/ec/ecp_nistp224.c
+++ b/lib/libcrypto/ec/ecp_nistp224.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_nistp224.c,v 1.25 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp224.c,v 1.26 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Emilia Kasper (Google) for the OpenSSL project.
*/
@@ -278,7 +278,7 @@ EC_GFp_nistp224_method(void)
}
/* Helper functions to convert field elements to/from internal representation */
-static void
+static void
bin28_to_felem(felem out, const u8 in[28])
{
out[0] = *((const uint64_t *) (in)) & 0x00ffffffffffffff;
@@ -287,7 +287,7 @@ bin28_to_felem(felem out, const u8 in[28])
out[3] = (*((const uint64_t *) (in + 21))) & 0x00ffffffffffffff;
}
-static void
+static void
felem_to_bin28(u8 out[28], const felem in)
{
unsigned i;
@@ -300,7 +300,7 @@ felem_to_bin28(u8 out[28], const felem in)
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
@@ -309,7 +309,7 @@ flip_endian(u8 * out, const u8 * in, unsigned len)
}
/* From OpenSSL BIGNUM to internal representation */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
@@ -353,7 +353,7 @@ felem_to_BN(BIGNUM * out, const felem in)
*
*/
-static void
+static void
felem_one(felem out)
{
out[0] = 1;
@@ -362,7 +362,7 @@ felem_one(felem out)
out[3] = 0;
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
@@ -372,7 +372,7 @@ felem_assign(felem out, const felem in)
}
/* Sum two field elements: out += in */
-static void
+static void
felem_sum(felem out, const felem in)
{
out[0] += in[0];
@@ -383,7 +383,7 @@ felem_sum(felem out, const felem in)
/* Get negative value: out = -in */
/* Assumes in[i] < 2^57 */
-static void
+static void
felem_neg(felem out, const felem in)
{
static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
@@ -400,7 +400,7 @@ felem_neg(felem out, const felem in)
/* Subtract field elements: out -= in */
/* Assumes in[i] < 2^57 */
-static void
+static void
felem_diff(felem out, const felem in)
{
static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
@@ -422,7 +422,7 @@ felem_diff(felem out, const felem in)
/* Subtract in unreduced 128-bit mode: out -= in */
/* Assumes in[i] < 2^119 */
-static void
+static void
widefelem_diff(widefelem out, const widefelem in)
{
static const widelimb two120 = ((widelimb) 1) << 120;
@@ -451,7 +451,7 @@ widefelem_diff(widefelem out, const widefelem in)
/* Subtract in mixed mode: out128 -= in64 */
/* in[i] < 2^63 */
-static void
+static void
felem_diff_128_64(widefelem out, const felem in)
{
static const widelimb two64p8 = (((widelimb) 1) << 64) +
@@ -475,7 +475,7 @@ felem_diff_128_64(widefelem out, const felem in)
/* Multiply a field element by a scalar: out = out * scalar
* The scalars we actually use are small, so results fit without overflow */
-static void
+static void
felem_scalar(felem out, const limb scalar)
{
out[0] *= scalar;
@@ -486,7 +486,7 @@ felem_scalar(felem out, const limb scalar)
/* Multiply an unreduced field element by a scalar: out = out * scalar
* The scalars we actually use are small, so results fit without overflow */
-static void
+static void
widefelem_scalar(widefelem out, const widelimb scalar)
{
out[0] *= scalar;
@@ -499,7 +499,7 @@ widefelem_scalar(widefelem out, const widelimb scalar)
}
/* Square a field element: out = in^2 */
-static void
+static void
felem_square(widefelem out, const felem in)
{
limb tmp0, tmp1, tmp2;
@@ -517,7 +517,7 @@ felem_square(widefelem out, const felem in)
}
/* Multiply two field elements: out = in1 * in2 */
-static void
+static void
felem_mul(widefelem out, const felem in1, const felem in2)
{
out[0] = ((widelimb) in1[0]) * in2[0];
@@ -535,7 +535,7 @@ felem_mul(widefelem out, const felem in1, const felem in2)
/* Reduce seven 128-bit coefficients to four 64-bit coefficients.
* Requires in[i] < 2^126,
* ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
-static void
+static void
felem_reduce(felem out, const widefelem in)
{
static const widelimb two127p15 = (((widelimb) 1) << 127) +
@@ -599,7 +599,7 @@ felem_reduce(felem out, const widefelem in)
out[3] = output[3];
}
-static void
+static void
felem_square_reduce(felem out, const felem in)
{
widefelem tmp;
@@ -607,7 +607,7 @@ felem_square_reduce(felem out, const felem in)
felem_reduce(out, tmp);
}
-static void
+static void
felem_mul_reduce(felem out, const felem in1, const felem in2)
{
widefelem tmp;
@@ -617,7 +617,7 @@ felem_mul_reduce(felem out, const felem in1, const felem in2)
/* Reduce to unique minimal representation.
* Requires 0 <= in < 2*p (always call felem_reduce first) */
-static void
+static void
felem_contract(felem out, const felem in)
{
static const int64_t two56 = ((limb) 1) << 56;
@@ -674,7 +674,7 @@ felem_contract(felem out, const felem in)
* We know that field elements are reduced to in < 2^225,
* so we only need to check three cases: 0, 2^224 - 2^96 + 1,
* and 2^225 - 2^97 + 2 */
-static limb
+static limb
felem_is_zero(const felem in)
{
limb zero, two224m96p1, two225m97p2;
@@ -690,7 +690,7 @@ felem_is_zero(const felem in)
return (zero | two224m96p1 | two225m97p2);
}
-static limb
+static limb
felem_is_zero_int(const felem in)
{
return (int) (felem_is_zero(in) & ((limb) 1));
@@ -698,7 +698,7 @@ felem_is_zero_int(const felem in)
/* Invert a field element */
/* Computation chain copied from djb's code */
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2, ftmp3, ftmp4;
@@ -897,7 +897,7 @@ point_double(felem x_out, felem y_out, felem z_out,
* (while not equal to the point at infinity).
* This case never happens during single point multiplication,
* so there is no timing leak for ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const felem x2, const felem y2, const felem z2)
@@ -1057,7 +1057,7 @@ point_add(felem x3, felem y3, felem z3,
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const u64 idx, unsigned int size, const felem pre_comp[ /* size */ ][3], felem out[3])
{
unsigned i, j;
@@ -1078,7 +1078,7 @@ select_point(const u64 idx, unsigned int size, const felem pre_comp[ /* size */
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, unsigned i)
{
if (i >= 224)
@@ -1091,7 +1091,7 @@ get_bit(const felem_bytearray in, unsigned i)
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const felem pre_comp[][17][3], const felem g_pre_comp[2][16][3])
@@ -1211,7 +1211,7 @@ nistp224_pre_comp_dup(void *src_)
return src_;
}
-static void
+static void
nistp224_pre_comp_free(void *pre_)
{
int i;
@@ -1227,7 +1227,7 @@ nistp224_pre_comp_free(void *pre_)
free(pre);
}
-static void
+static void
nistp224_pre_comp_clear_free(void *pre_)
{
int i;
@@ -1247,7 +1247,7 @@ nistp224_pre_comp_clear_free(void *pre_)
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp224_group_init(EC_GROUP * group)
{
int ret;
@@ -1256,7 +1256,7 @@ ec_GFp_nistp224_group_init(EC_GROUP * group)
return ret;
}
-int
+int
ec_GFp_nistp224_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
@@ -1290,7 +1290,7 @@ ec_GFp_nistp224_group_set_curve(EC_GROUP * group, const BIGNUM * p,
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
@@ -1330,7 +1330,7 @@ ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP * group,
return 1;
}
-static void
+static void
make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /* num+1 */ ])
{
/*
@@ -1353,7 +1353,7 @@ make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp224_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
@@ -1548,7 +1548,7 @@ ec_GFp_nistp224_points_mul(const EC_GROUP * group, EC_POINT * r,
return ret;
}
-int
+int
ec_GFp_nistp224_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
@@ -1675,7 +1675,7 @@ ec_GFp_nistp224_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp224_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp224_pre_comp_dup,
diff --git a/lib/libcrypto/ec/ecp_nistp256.c b/lib/libcrypto/ec/ecp_nistp256.c
index 674143cc3ee..57b003aefa7 100644
--- a/lib/libcrypto/ec/ecp_nistp256.c
+++ b/lib/libcrypto/ec/ecp_nistp256.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_nistp256.c,v 1.24 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp256.c,v 1.25 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Adam Langley (Google) for the OpenSSL project
*/
@@ -115,7 +115,7 @@ static const u64 bottom63bits = 0x7ffffffffffffffful;
/* bin32_to_felem takes a little-endian byte array and converts it into felem
* form. This assumes that the CPU is little-endian. */
-static void
+static void
bin32_to_felem(felem out, const u8 in[32])
{
out[0] = *((u64 *) & in[0]);
@@ -126,7 +126,7 @@ bin32_to_felem(felem out, const u8 in[32])
/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
* 32 byte array. This assumes that the CPU is little-endian. */
-static void
+static void
smallfelem_to_bin32(u8 out[32], const smallfelem in)
{
*((u64 *) & out[0]) = in[0];
@@ -136,7 +136,7 @@ smallfelem_to_bin32(u8 out[32], const smallfelem in)
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
@@ -145,7 +145,7 @@ flip_endian(u8 * out, const u8 * in, unsigned len)
}
/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
@@ -183,7 +183,7 @@ smallfelem_to_BN(BIGNUM * out, const smallfelem in)
/* Field operations
* ---------------- */
-static void
+static void
smallfelem_one(smallfelem out)
{
out[0] = 1;
@@ -192,7 +192,7 @@ smallfelem_one(smallfelem out)
out[3] = 0;
}
-static void
+static void
smallfelem_assign(smallfelem out, const smallfelem in)
{
out[0] = in[0];
@@ -201,7 +201,7 @@ smallfelem_assign(smallfelem out, const smallfelem in)
out[3] = in[3];
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
@@ -211,7 +211,7 @@ felem_assign(felem out, const felem in)
}
/* felem_sum sets out = out + in. */
-static void
+static void
felem_sum(felem out, const felem in)
{
out[0] += in[0];
@@ -221,7 +221,7 @@ felem_sum(felem out, const felem in)
}
/* felem_small_sum sets out = out + in. */
-static void
+static void
felem_small_sum(felem out, const smallfelem in)
{
out[0] += in[0];
@@ -231,7 +231,7 @@ felem_small_sum(felem out, const smallfelem in)
}
/* felem_scalar sets out = out * scalar */
-static void
+static void
felem_scalar(felem out, const u64 scalar)
{
out[0] *= scalar;
@@ -241,7 +241,7 @@ felem_scalar(felem out, const u64 scalar)
}
/* longfelem_scalar sets out = out * scalar */
-static void
+static void
longfelem_scalar(longfelem out, const u64 scalar)
{
out[0] *= scalar;
@@ -265,7 +265,7 @@ static const felem zero105 = {two105m41m9, two105, two105m41p9, two105m41p9};
* On exit:
* out[i] < out[i] + 2^105
*/
-static void
+static void
smallfelem_neg(felem out, const smallfelem small)
{
/* In order to prevent underflow, we subtract from 0 mod p. */
@@ -281,7 +281,7 @@ smallfelem_neg(felem out, const smallfelem small)
* On exit:
* out[i] < out[i] + 2^105
*/
-static void
+static void
felem_diff(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
@@ -310,7 +310,7 @@ static const felem zero107 = {two107m43m11, two107, two107m43p11, two107m43p11};
* On exit:
* out[i] < out[i] + 2^107
*/
-static void
+static void
felem_diff_zero107(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
@@ -331,7 +331,7 @@ felem_diff_zero107(felem out, const felem in)
* On exit:
* out[i] < out[i] + 2^70 + 2^40
*/
-static void
+static void
longfelem_diff(longfelem out, const longfelem in)
{
static const limb two70m8p6 = (((limb) 1) << 70) - (((limb) 1) << 8) + (((limb) 1) << 6);
@@ -377,7 +377,7 @@ static const felem zero110 = {two64m0, two110p32m0, two64m46, two64m32};
* On exit:
* out[i] < 2^64
*/
-static void
+static void
felem_shrink(smallfelem out, const felem in)
{
felem tmp;
@@ -468,7 +468,7 @@ felem_shrink(smallfelem out, const felem in)
}
/* smallfelem_expand converts a smallfelem to an felem */
-static void
+static void
smallfelem_expand(felem out, const smallfelem in)
{
out[0] = in[0];
@@ -483,7 +483,7 @@ smallfelem_expand(felem out, const smallfelem in)
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
smallfelem_square(longfelem out, const smallfelem small)
{
limb a;
@@ -562,7 +562,7 @@ smallfelem_square(longfelem out, const smallfelem small)
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_square(longfelem out, const felem in)
{
u64 small[4];
@@ -577,7 +577,7 @@ felem_square(longfelem out, const felem in)
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2)
{
limb a;
@@ -693,7 +693,7 @@ smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2)
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_mul(longfelem out, const felem in1, const felem in2)
{
smallfelem small1, small2;
@@ -709,7 +709,7 @@ felem_mul(longfelem out, const felem in1, const felem in2)
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_small_mul(longfelem out, const smallfelem small1, const felem in2)
{
smallfelem small2;
@@ -736,7 +736,7 @@ static const felem zero100 = {two100m36m4, two100, two100m36p4, two100m36p4};
* out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
* out[3] <= out[3] + 2^32*in[4] + 3*in[7]
*/
-static void
+static void
felem_reduce_(felem out, const longfelem in)
{
int128_t c;
@@ -779,7 +779,7 @@ felem_reduce_(felem out, const longfelem in)
* On exit:
* out[i] < 2^101
*/
-static void
+static void
felem_reduce(felem out, const longfelem in)
{
out[0] = zero100[0] + in[0];
@@ -794,7 +794,7 @@ felem_reduce(felem out, const longfelem in)
* out[1] > 2^100 - 2^64 - 7*2^96 > 0 out[2] > 2^100 - 2^36 + 2^4 -
* 5*2^64 - 5*2^96 > 0 out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96
* - 3*2^96 > 0
- *
+ *
* out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 out[1] < 2^100 +
* 3*2^64 + 5*2^64 + 3*2^97 < 2^101 out[2] < 2^100 + 5*2^64 + 2^64 +
* 3*2^65 + 2^97 < 2^101 out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 <
@@ -808,7 +808,7 @@ felem_reduce(felem out, const longfelem in)
* On exit:
* out[i] < 2^106
*/
-static void
+static void
felem_reduce_zero105(felem out, const longfelem in)
{
out[0] = zero105[0] + in[0];
@@ -823,7 +823,7 @@ felem_reduce_zero105(felem out, const longfelem in)
* out[1] > 2^105 - 2^71 - 2^103 > 0 out[2] > 2^105 - 2^41 + 2^9 -
* 2^71 - 2^103 > 0 out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 -
* 2^103 > 0
- *
+ *
* out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 out[1] < 2^105 + 2^71 +
* 2^71 + 2^103 < 2^106 out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 <
* 2^106 out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
@@ -832,7 +832,7 @@ felem_reduce_zero105(felem out, const longfelem in)
/* subtract_u64 sets *result = *result - v and *carry to one if the subtraction
* underflowed. */
-static void
+static void
subtract_u64(u64 * result, u64 * carry, u64 v)
{
uint128_t r = *result;
@@ -845,7 +845,7 @@ subtract_u64(u64 * result, u64 * carry, u64 v)
* On entry:
* in[i] < 2^109
*/
-static void
+static void
felem_contract(smallfelem out, const felem in)
{
unsigned i;
@@ -909,7 +909,7 @@ felem_contract(smallfelem out, const felem in)
subtract_u64(&out[3], &carry, result & kPrime[3]);
}
-static void
+static void
smallfelem_square_contract(smallfelem out, const smallfelem in)
{
longfelem longtmp;
@@ -920,7 +920,7 @@ smallfelem_square_contract(smallfelem out, const smallfelem in)
felem_contract(out, tmp);
}
-static void
+static void
smallfelem_mul_contract(smallfelem out, const smallfelem in1, const smallfelem in2)
{
longfelem longtmp;
@@ -936,7 +936,7 @@ smallfelem_mul_contract(smallfelem out, const smallfelem in1, const smallfelem i
* On entry:
* small[i] < 2^64
*/
-static limb
+static limb
smallfelem_is_zero(const smallfelem small)
{
limb result;
@@ -972,7 +972,7 @@ smallfelem_is_zero(const smallfelem small)
return result;
}
-static int
+static int
smallfelem_is_zero_int(const smallfelem small)
{
return (int) (smallfelem_is_zero(small) & ((limb) 1));
@@ -985,7 +985,7 @@ smallfelem_is_zero_int(const smallfelem small)
* a^{p-1} = 1 (mod p)
* a^{p-2} = a^{-1} (mod p)
*/
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2;
@@ -1080,7 +1080,7 @@ felem_inv(felem out, const felem in)
felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
}
-static void
+static void
smallfelem_inv_contract(smallfelem out, const smallfelem in)
{
felem tmp;
@@ -1233,7 +1233,7 @@ copy_small_conditional(felem out, const smallfelem in, limb mask)
* are equal, (while not equal to the point at infinity). This case never
* happens during single point multiplication, so there is no timing leak for
* ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const smallfelem x2, const smallfelem y2, const smallfelem z2)
@@ -1393,7 +1393,7 @@ point_add(felem x3, felem y3, felem z3,
/* point_add_small is the same as point_add, except that it operates on
* smallfelems */
-static void
+static void
point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
smallfelem x1, smallfelem y1, smallfelem z1,
smallfelem x2, smallfelem y2, smallfelem z2)
@@ -1545,7 +1545,7 @@ static const smallfelem gmul[2][16][3] =
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const u64 idx, unsigned int size, const smallfelem pre_comp[16][3], smallfelem out[3])
{
unsigned i, j;
@@ -1566,7 +1566,7 @@ select_point(const u64 idx, unsigned int size, const smallfelem pre_comp[16][3],
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, int i)
{
if ((i < 0) || (i >= 256))
@@ -1579,7 +1579,7 @@ get_bit(const felem_bytearray in, int i)
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const smallfelem pre_comp[][17][3], const smallfelem g_pre_comp[2][16][3])
@@ -1698,20 +1698,20 @@ EC_GFp_nistp256_method(void)
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_nistp256_point_get_affine_coordinates,
+ ec_GFp_nistp256_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
@@ -1760,7 +1760,7 @@ nistp256_pre_comp_dup(void *src_)
return src_;
}
-static void
+static void
nistp256_pre_comp_free(void *pre_)
{
int i;
@@ -1776,7 +1776,7 @@ nistp256_pre_comp_free(void *pre_)
free(pre);
}
-static void
+static void
nistp256_pre_comp_clear_free(void *pre_)
{
int i;
@@ -1796,7 +1796,7 @@ nistp256_pre_comp_clear_free(void *pre_)
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp256_group_init(EC_GROUP * group)
{
int ret;
@@ -1805,7 +1805,7 @@ ec_GFp_nistp256_group_init(EC_GROUP * group)
return ret;
}
-int
+int
ec_GFp_nistp256_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
@@ -1839,7 +1839,7 @@ ec_GFp_nistp256_group_set_curve(EC_GROUP * group, const BIGNUM * p,
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
@@ -1880,7 +1880,7 @@ ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP * group,
return 1;
}
-static void
+static void
make_points_affine(size_t num, smallfelem points[ /* num */ ][3], smallfelem tmp_smallfelems[ /* num+1 */ ])
{
/*
@@ -1903,7 +1903,7 @@ make_points_affine(size_t num, smallfelem points[ /* num */ ][3], smallfelem tmp
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp256_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
@@ -2101,7 +2101,7 @@ ec_GFp_nistp256_points_mul(const EC_GROUP * group, EC_POINT * r,
return ret;
}
-int
+int
ec_GFp_nistp256_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
@@ -2222,7 +2222,7 @@ ec_GFp_nistp256_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp256_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp256_pre_comp_dup,
diff --git a/lib/libcrypto/ec/ecp_nistp521.c b/lib/libcrypto/ec/ecp_nistp521.c
index 3d38c723961..db2060668c2 100644
--- a/lib/libcrypto/ec/ecp_nistp521.c
+++ b/lib/libcrypto/ec/ecp_nistp521.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_nistp521.c,v 1.25 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp521.c,v 1.26 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Adam Langley (Google) for the OpenSSL project
*/
@@ -130,7 +130,7 @@ static const limb bottom58bits = 0x3ffffffffffffff;
/* bin66_to_felem takes a little-endian byte array and converts it into felem
* form. This assumes that the CPU is little-endian. */
-static void
+static void
bin66_to_felem(felem out, const u8 in[66])
{
out[0] = (*((limb *) & in[0])) & bottom58bits;
@@ -146,7 +146,7 @@ bin66_to_felem(felem out, const u8 in[66])
/* felem_to_bin66 takes an felem and serialises into a little endian, 66 byte
* array. This assumes that the CPU is little-endian. */
-static void
+static void
felem_to_bin66(u8 out[66], const felem in)
{
memset(out, 0, 66);
@@ -162,7 +162,7 @@ felem_to_bin66(u8 out[66], const felem in)
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
@@ -171,7 +171,7 @@ flip_endian(u8 * out, const u8 * in, unsigned len)
}
/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
@@ -209,7 +209,7 @@ felem_to_BN(BIGNUM * out, const felem in)
/* Field operations
* ---------------- */
-static void
+static void
felem_one(felem out)
{
out[0] = 1;
@@ -223,7 +223,7 @@ felem_one(felem out)
out[8] = 0;
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
@@ -238,7 +238,7 @@ felem_assign(felem out, const felem in)
}
/* felem_sum64 sets out = out + in. */
-static void
+static void
felem_sum64(felem out, const felem in)
{
out[0] += in[0];
@@ -253,7 +253,7 @@ felem_sum64(felem out, const felem in)
}
/* felem_scalar sets out = in * scalar */
-static void
+static void
felem_scalar(felem out, const felem in, limb scalar)
{
out[0] = in[0] * scalar;
@@ -268,7 +268,7 @@ felem_scalar(felem out, const felem in, limb scalar)
}
/* felem_scalar64 sets out = out * scalar */
-static void
+static void
felem_scalar64(felem out, limb scalar)
{
out[0] *= scalar;
@@ -283,7 +283,7 @@ felem_scalar64(felem out, limb scalar)
}
/* felem_scalar128 sets out = out * scalar */
-static void
+static void
felem_scalar128(largefelem out, limb scalar)
{
out[0] *= scalar;
@@ -303,7 +303,7 @@ felem_scalar128(largefelem out, limb scalar)
* On exit:
* out[i] < 2^62
*/
-static void
+static void
felem_neg(felem out, const felem in)
{
/* In order to prevent underflow, we subtract from 0 mod p. */
@@ -327,7 +327,7 @@ felem_neg(felem out, const felem in)
* On exit:
* out[i] < out[i] + 2^62
*/
-static void
+static void
felem_diff64(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
@@ -351,7 +351,7 @@ felem_diff64(felem out, const felem in)
* On exit:
* out[i] < out[i] + 2^63
*/
-static void
+static void
felem_diff_128_64(largefelem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
@@ -375,7 +375,7 @@ felem_diff_128_64(largefelem out, const felem in)
* On exit:
* out[i] < out[i] + 2^127 - 2^69
*/
-static void
+static void
felem_diff128(largefelem out, const largefelem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
@@ -399,7 +399,7 @@ felem_diff128(largefelem out, const largefelem in)
* On exit:
* out[i] < 17 * max(in[i]) * max(in[i])
*/
-static void
+static void
felem_square(largefelem out, const felem in)
{
felem inx2, inx4;
@@ -493,7 +493,7 @@ felem_square(largefelem out, const felem in)
* On exit:
* out[i] < 17 * max(in1[i]) * max(in2[i])
*/
-static void
+static void
felem_mul(largefelem out, const felem in1, const felem in2)
{
felem in2x2;
@@ -608,7 +608,7 @@ static const limb bottom52bits = 0xfffffffffffff;
* On exit:
* out[i] < 2^59 + 2^14
*/
-static void
+static void
felem_reduce(felem out, const largefelem in)
{
u64 overflow1, overflow2;
@@ -680,7 +680,7 @@ felem_reduce(felem out, const largefelem in)
*/
}
-static void
+static void
felem_square_reduce(felem out, const felem in)
{
largefelem tmp;
@@ -688,7 +688,7 @@ felem_square_reduce(felem out, const felem in)
felem_reduce(out, tmp);
}
-static void
+static void
felem_mul_reduce(felem out, const felem in1, const felem in2)
{
largefelem tmp;
@@ -703,7 +703,7 @@ felem_mul_reduce(felem out, const felem in1, const felem in2)
* a^{p-1} = 1 (mod p)
* a^{p-2} = a^{-1} (mod p)
*/
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2, ftmp3, ftmp4;
@@ -817,7 +817,7 @@ static const felem kPrime =
* On entry:
* in[i] < 2^59 + 2^14
*/
-static limb
+static limb
felem_is_zero(const felem in)
{
felem ftmp;
@@ -886,7 +886,7 @@ felem_is_zero(const felem in)
return is_zero;
}
-static int
+static int
felem_is_zero_int(const felem in)
{
return (int) (felem_is_zero(in) & ((limb) 1));
@@ -896,7 +896,7 @@ felem_is_zero_int(const felem in)
* On entry:
* in[i] < 2^59 + 2^14
*/
-static void
+static void
felem_contract(felem out, const felem in)
{
limb is_p, is_greater, sign;
@@ -1153,7 +1153,7 @@ copy_conditional(felem out, const felem in, limb mask)
* are equal (while not equal to the point at infinity). This case never
* happens during single point multiplication, so there is no timing leak for
* ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const felem x2, const felem y2, const felem z2)
@@ -1450,7 +1450,7 @@ static const felem gmul[16][3] =
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const limb idx, unsigned int size, const felem pre_comp[ /* size */ ][3],
felem out[3])
{
@@ -1472,7 +1472,7 @@ select_point(const limb idx, unsigned int size, const felem pre_comp[ /* size */
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, int i)
{
if (i < 0)
@@ -1485,7 +1485,7 @@ get_bit(const felem_bytearray in, int i)
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const felem pre_comp[][17][3], const felem g_pre_comp[16][3])
@@ -1588,20 +1588,20 @@ EC_GFp_nistp521_method(void)
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_nistp521_point_get_affine_coordinates,
+ ec_GFp_nistp521_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
@@ -1651,7 +1651,7 @@ nistp521_pre_comp_dup(void *src_)
return src_;
}
-static void
+static void
nistp521_pre_comp_free(void *pre_)
{
int i;
@@ -1667,7 +1667,7 @@ nistp521_pre_comp_free(void *pre_)
free(pre);
}
-static void
+static void
nistp521_pre_comp_clear_free(void *pre_)
{
int i;
@@ -1687,7 +1687,7 @@ nistp521_pre_comp_clear_free(void *pre_)
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp521_group_init(EC_GROUP * group)
{
int ret;
@@ -1696,7 +1696,7 @@ ec_GFp_nistp521_group_init(EC_GROUP * group)
return ret;
}
-int
+int
ec_GFp_nistp521_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
@@ -1730,7 +1730,7 @@ ec_GFp_nistp521_group_set_curve(EC_GROUP * group, const BIGNUM * p,
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
@@ -1770,7 +1770,7 @@ ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP * group,
return 1;
}
-static void
+static void
make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /* num+1 */ ])
{
/*
@@ -1793,7 +1793,7 @@ make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp521_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
@@ -1990,7 +1990,7 @@ ec_GFp_nistp521_points_mul(const EC_GROUP * group, EC_POINT * r,
return ret;
}
-int
+int
ec_GFp_nistp521_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
@@ -2097,7 +2097,7 @@ ec_GFp_nistp521_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp521_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp521_pre_comp_dup,
diff --git a/lib/libcrypto/ec/ecp_smpl.c b/lib/libcrypto/ec/ecp_smpl.c
index 92234274594..96ab5bd44eb 100644
--- a/lib/libcrypto/ec/ecp_smpl.c
+++ b/lib/libcrypto/ec/ecp_smpl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ecp_smpl.c,v 1.31 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_smpl.c,v 1.32 2021/04/20 17:38:02 tb Exp $ */
/* Includes code written by Lenka Fibikova <fibikova@exp-math.uni-essen.de>
* for the OpenSSL project.
* Includes code written by Bodo Moeller for the OpenSSL project.
@@ -81,20 +81,20 @@ EC_GFp_simple_method(void)
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
@@ -129,7 +129,7 @@ EC_GFp_simple_method(void)
*/
-int
+int
ec_GFp_simple_group_init(EC_GROUP * group)
{
BN_init(&group->field);
@@ -140,7 +140,7 @@ ec_GFp_simple_group_init(EC_GROUP * group)
}
-void
+void
ec_GFp_simple_group_finish(EC_GROUP * group)
{
BN_free(&group->field);
@@ -149,7 +149,7 @@ ec_GFp_simple_group_finish(EC_GROUP * group)
}
-void
+void
ec_GFp_simple_group_clear_finish(EC_GROUP * group)
{
BN_clear_free(&group->field);
@@ -158,7 +158,7 @@ ec_GFp_simple_group_clear_finish(EC_GROUP * group)
}
-int
+int
ec_GFp_simple_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
if (!BN_copy(&dest->field, &src->field))
@@ -174,7 +174,7 @@ ec_GFp_simple_group_copy(EC_GROUP * dest, const EC_GROUP * src)
}
-int
+int
ec_GFp_simple_group_set_curve(EC_GROUP * group,
const BIGNUM * p, const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
@@ -231,7 +231,7 @@ ec_GFp_simple_group_set_curve(EC_GROUP * group,
}
-int
+int
ec_GFp_simple_group_get_curve(const EC_GROUP * group, BIGNUM * p, BIGNUM * a, BIGNUM * b, BN_CTX * ctx)
{
int ret = 0;
@@ -275,14 +275,14 @@ ec_GFp_simple_group_get_curve(const EC_GROUP * group, BIGNUM * p, BIGNUM * a, BI
}
-int
+int
ec_GFp_simple_group_get_degree(const EC_GROUP * group)
{
return BN_num_bits(&group->field);
}
-int
+int
ec_GFp_simple_group_check_discriminant(const EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
@@ -358,7 +358,7 @@ ec_GFp_simple_group_check_discriminant(const EC_GROUP * group, BN_CTX * ctx)
}
-int
+int
ec_GFp_simple_point_init(EC_POINT * point)
{
BN_init(&point->X);
@@ -370,7 +370,7 @@ ec_GFp_simple_point_init(EC_POINT * point)
}
-void
+void
ec_GFp_simple_point_finish(EC_POINT * point)
{
BN_free(&point->X);
@@ -379,7 +379,7 @@ ec_GFp_simple_point_finish(EC_POINT * point)
}
-void
+void
ec_GFp_simple_point_clear_finish(EC_POINT * point)
{
BN_clear_free(&point->X);
@@ -389,7 +389,7 @@ ec_GFp_simple_point_clear_finish(EC_POINT * point)
}
-int
+int
ec_GFp_simple_point_copy(EC_POINT * dest, const EC_POINT * src)
{
if (!BN_copy(&dest->X, &src->X))
@@ -404,7 +404,7 @@ ec_GFp_simple_point_copy(EC_POINT * dest, const EC_POINT * src)
}
-int
+int
ec_GFp_simple_point_set_to_infinity(const EC_GROUP * group, EC_POINT * point)
{
point->Z_is_one = 0;
@@ -633,7 +633,7 @@ ec_GFp_simple_point_get_affine_coordinates(const EC_GROUP * group, const EC_POIN
return ret;
}
-int
+int
ec_GFp_simple_add(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, const EC_POINT * b, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
@@ -822,7 +822,7 @@ ec_GFp_simple_add(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, cons
}
-int
+int
ec_GFp_simple_dbl(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
@@ -964,7 +964,7 @@ ec_GFp_simple_dbl(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, BN_C
}
-int
+int
ec_GFp_simple_invert(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx)
{
if (EC_POINT_is_at_infinity(group, point) > 0 || BN_is_zero(&point->Y))
@@ -975,14 +975,14 @@ ec_GFp_simple_invert(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx)
}
-int
+int
ec_GFp_simple_is_at_infinity(const EC_GROUP * group, const EC_POINT * point)
{
return BN_is_zero(&point->Z);
}
-int
+int
ec_GFp_simple_is_on_curve(const EC_GROUP * group, const EC_POINT * point, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
@@ -1085,7 +1085,7 @@ ec_GFp_simple_is_on_curve(const EC_GROUP * group, const EC_POINT * point, BN_CTX
}
-int
+int
ec_GFp_simple_cmp(const EC_GROUP * group, const EC_POINT * a, const EC_POINT * b, BN_CTX * ctx)
{
/*
@@ -1187,7 +1187,7 @@ ec_GFp_simple_cmp(const EC_GROUP * group, const EC_POINT * a, const EC_POINT * b
}
-int
+int
ec_GFp_simple_make_affine(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx)
{
BN_CTX *new_ctx = NULL;
@@ -1225,7 +1225,7 @@ ec_GFp_simple_make_affine(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx
}
-int
+int
ec_GFp_simple_points_make_affine(const EC_GROUP * group, size_t num, EC_POINT * points[], BN_CTX * ctx)
{
BN_CTX *new_ctx = NULL;
@@ -1271,11 +1271,11 @@ ec_GFp_simple_points_make_affine(const EC_GROUP * group, size_t num, EC_POINT *
/*
* The array is used as a binary tree, exactly as in heapsort:
- *
+ *
* heap[1] heap[2] heap[3] heap[4] heap[5]
* heap[6] heap[7] heap[8]heap[9] heap[10]heap[11]
* heap[12]heap[13] heap[14] heap[15]
- *
+ *
* We put the Z's in the last line; then we set each other node to the
* product of its two child-nodes (where empty or 0 entries are
* treated as ones); then we invert heap[1]; then we invert each
@@ -1400,13 +1400,13 @@ ec_GFp_simple_points_make_affine(const EC_GROUP * group, size_t num, EC_POINT *
}
-int
+int
ec_GFp_simple_field_mul(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
return BN_mod_mul(r, a, b, &group->field, ctx);
}
-int
+int
ec_GFp_simple_field_sqr(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a, BN_CTX * ctx)
{
return BN_mod_sqr(r, a, &group->field, ctx);
@@ -1416,7 +1416,7 @@ ec_GFp_simple_field_sqr(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a, BN
* Apply randomization of EC point projective coordinates:
*
* (X, Y, Z) = (lambda^2 * X, lambda^3 * Y, lambda * Z)
- *
+ *
* where lambda is in the interval [1, group->field).
*/
int
@@ -1686,7 +1686,7 @@ ec_GFp_simple_mul_ct(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
}
/* one final cswap to move the right value into r */
EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
-
+
ret = 1;
err: