While there zap trailing whitespace from a KNF approximation gone wrong.
-/* $OpenBSD: ecp_mont.c,v 1.18 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_mont.c,v 1.19 2021/04/20 17:38:02 tb Exp $ */
/*
* Originally written by Bodo Moeller for the OpenSSL project.
*/
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
}
-int
+int
ec_GFp_mont_group_init(EC_GROUP * group)
{
int ok;
}
-void
+void
ec_GFp_mont_group_finish(EC_GROUP * group)
{
BN_MONT_CTX_free(group->field_data1);
}
-void
+void
ec_GFp_mont_group_clear_finish(EC_GROUP * group)
{
BN_MONT_CTX_free(group->field_data1);
}
-int
+int
ec_GFp_mont_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
BN_MONT_CTX_free(dest->field_data1);
}
-int
+int
ec_GFp_mont_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
}
-int
+int
ec_GFp_mont_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
}
-int
+int
ec_GFp_mont_field_sqr(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
}
-int
+int
ec_GFp_mont_field_encode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
}
-int
+int
ec_GFp_mont_field_decode(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
BN_CTX *ctx)
{
}
-int
+int
ec_GFp_mont_field_set_to_one(const EC_GROUP *group, BIGNUM *r, BN_CTX *ctx)
{
if (group->field_data2 == NULL) {
-/* $OpenBSD: ecp_nist.c,v 1.16 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nist.c,v 1.17 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Nils Larsch for the OpenSSL project.
*/
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
return &ret;
}
-int
+int
ec_GFp_nist_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
dest->field_mod_func = src->field_mod_func;
return ec_GFp_simple_group_copy(dest, src);
}
-int
+int
ec_GFp_nist_group_set_curve(EC_GROUP *group, const BIGNUM *p,
const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
{
}
-int
+int
ec_GFp_nist_field_mul(const EC_GROUP *group, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *ctx)
{
}
-int
+int
ec_GFp_nist_field_sqr(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a,
BN_CTX * ctx)
{
-/* $OpenBSD: ecp_nistp224.c,v 1.25 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp224.c,v 1.26 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Emilia Kasper (Google) for the OpenSSL project.
*/
}
/* Helper functions to convert field elements to/from internal representation */
-static void
+static void
bin28_to_felem(felem out, const u8 in[28])
{
out[0] = *((const uint64_t *) (in)) & 0x00ffffffffffffff;
out[3] = (*((const uint64_t *) (in + 21))) & 0x00ffffffffffffff;
}
-static void
+static void
felem_to_bin28(u8 out[28], const felem in)
{
unsigned i;
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
}
/* From OpenSSL BIGNUM to internal representation */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
*
*/
-static void
+static void
felem_one(felem out)
{
out[0] = 1;
out[3] = 0;
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
}
/* Sum two field elements: out += in */
-static void
+static void
felem_sum(felem out, const felem in)
{
out[0] += in[0];
/* Get negative value: out = -in */
/* Assumes in[i] < 2^57 */
-static void
+static void
felem_neg(felem out, const felem in)
{
static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
/* Subtract field elements: out -= in */
/* Assumes in[i] < 2^57 */
-static void
+static void
felem_diff(felem out, const felem in)
{
static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
/* Subtract in unreduced 128-bit mode: out -= in */
/* Assumes in[i] < 2^119 */
-static void
+static void
widefelem_diff(widefelem out, const widefelem in)
{
static const widelimb two120 = ((widelimb) 1) << 120;
/* Subtract in mixed mode: out128 -= in64 */
/* in[i] < 2^63 */
-static void
+static void
felem_diff_128_64(widefelem out, const felem in)
{
static const widelimb two64p8 = (((widelimb) 1) << 64) +
/* Multiply a field element by a scalar: out = out * scalar
* The scalars we actually use are small, so results fit without overflow */
-static void
+static void
felem_scalar(felem out, const limb scalar)
{
out[0] *= scalar;
/* Multiply an unreduced field element by a scalar: out = out * scalar
* The scalars we actually use are small, so results fit without overflow */
-static void
+static void
widefelem_scalar(widefelem out, const widelimb scalar)
{
out[0] *= scalar;
}
/* Square a field element: out = in^2 */
-static void
+static void
felem_square(widefelem out, const felem in)
{
limb tmp0, tmp1, tmp2;
}
/* Multiply two field elements: out = in1 * in2 */
-static void
+static void
felem_mul(widefelem out, const felem in1, const felem in2)
{
out[0] = ((widelimb) in1[0]) * in2[0];
/* Reduce seven 128-bit coefficients to four 64-bit coefficients.
* Requires in[i] < 2^126,
* ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
-static void
+static void
felem_reduce(felem out, const widefelem in)
{
static const widelimb two127p15 = (((widelimb) 1) << 127) +
out[3] = output[3];
}
-static void
+static void
felem_square_reduce(felem out, const felem in)
{
widefelem tmp;
felem_reduce(out, tmp);
}
-static void
+static void
felem_mul_reduce(felem out, const felem in1, const felem in2)
{
widefelem tmp;
/* Reduce to unique minimal representation.
* Requires 0 <= in < 2*p (always call felem_reduce first) */
-static void
+static void
felem_contract(felem out, const felem in)
{
static const int64_t two56 = ((limb) 1) << 56;
* We know that field elements are reduced to in < 2^225,
* so we only need to check three cases: 0, 2^224 - 2^96 + 1,
* and 2^225 - 2^97 + 2 */
-static limb
+static limb
felem_is_zero(const felem in)
{
limb zero, two224m96p1, two225m97p2;
return (zero | two224m96p1 | two225m97p2);
}
-static limb
+static limb
felem_is_zero_int(const felem in)
{
return (int) (felem_is_zero(in) & ((limb) 1));
/* Invert a field element */
/* Computation chain copied from djb's code */
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2, ftmp3, ftmp4;
* (while not equal to the point at infinity).
* This case never happens during single point multiplication,
* so there is no timing leak for ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const felem x2, const felem y2, const felem z2)
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const u64 idx, unsigned int size, const felem pre_comp[ /* size */ ][3], felem out[3])
{
unsigned i, j;
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, unsigned i)
{
if (i >= 224)
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const felem pre_comp[][17][3], const felem g_pre_comp[2][16][3])
return src_;
}
-static void
+static void
nistp224_pre_comp_free(void *pre_)
{
int i;
free(pre);
}
-static void
+static void
nistp224_pre_comp_clear_free(void *pre_)
{
int i;
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp224_group_init(EC_GROUP * group)
{
int ret;
return ret;
}
-int
+int
ec_GFp_nistp224_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
return 1;
}
-static void
+static void
make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /* num+1 */ ])
{
/*
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp224_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp224_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
return ret;
}
-int
+int
ec_GFp_nistp224_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp224_pre_comp_dup,
-/* $OpenBSD: ecp_nistp256.c,v 1.24 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp256.c,v 1.25 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Adam Langley (Google) for the OpenSSL project
*/
/* bin32_to_felem takes a little-endian byte array and converts it into felem
* form. This assumes that the CPU is little-endian. */
-static void
+static void
bin32_to_felem(felem out, const u8 in[32])
{
out[0] = *((u64 *) & in[0]);
/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
* 32 byte array. This assumes that the CPU is little-endian. */
-static void
+static void
smallfelem_to_bin32(u8 out[32], const smallfelem in)
{
*((u64 *) & out[0]) = in[0];
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
}
/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
/* Field operations
* ---------------- */
-static void
+static void
smallfelem_one(smallfelem out)
{
out[0] = 1;
out[3] = 0;
}
-static void
+static void
smallfelem_assign(smallfelem out, const smallfelem in)
{
out[0] = in[0];
out[3] = in[3];
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
}
/* felem_sum sets out = out + in. */
-static void
+static void
felem_sum(felem out, const felem in)
{
out[0] += in[0];
}
/* felem_small_sum sets out = out + in. */
-static void
+static void
felem_small_sum(felem out, const smallfelem in)
{
out[0] += in[0];
}
/* felem_scalar sets out = out * scalar */
-static void
+static void
felem_scalar(felem out, const u64 scalar)
{
out[0] *= scalar;
}
/* longfelem_scalar sets out = out * scalar */
-static void
+static void
longfelem_scalar(longfelem out, const u64 scalar)
{
out[0] *= scalar;
* On exit:
* out[i] < out[i] + 2^105
*/
-static void
+static void
smallfelem_neg(felem out, const smallfelem small)
{
/* In order to prevent underflow, we subtract from 0 mod p. */
* On exit:
* out[i] < out[i] + 2^105
*/
-static void
+static void
felem_diff(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
* On exit:
* out[i] < out[i] + 2^107
*/
-static void
+static void
felem_diff_zero107(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
* On exit:
* out[i] < out[i] + 2^70 + 2^40
*/
-static void
+static void
longfelem_diff(longfelem out, const longfelem in)
{
static const limb two70m8p6 = (((limb) 1) << 70) - (((limb) 1) << 8) + (((limb) 1) << 6);
* On exit:
* out[i] < 2^64
*/
-static void
+static void
felem_shrink(smallfelem out, const felem in)
{
felem tmp;
}
/* smallfelem_expand converts a smallfelem to an felem */
-static void
+static void
smallfelem_expand(felem out, const smallfelem in)
{
out[0] = in[0];
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
smallfelem_square(longfelem out, const smallfelem small)
{
limb a;
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_square(longfelem out, const felem in)
{
u64 small[4];
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2)
{
limb a;
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_mul(longfelem out, const felem in1, const felem in2)
{
smallfelem small1, small2;
* On exit:
* out[i] < 7 * 2^64 < 2^67
*/
-static void
+static void
felem_small_mul(longfelem out, const smallfelem small1, const felem in2)
{
smallfelem small2;
* out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
* out[3] <= out[3] + 2^32*in[4] + 3*in[7]
*/
-static void
+static void
felem_reduce_(felem out, const longfelem in)
{
int128_t c;
* On exit:
* out[i] < 2^101
*/
-static void
+static void
felem_reduce(felem out, const longfelem in)
{
out[0] = zero100[0] + in[0];
* out[1] > 2^100 - 2^64 - 7*2^96 > 0 out[2] > 2^100 - 2^36 + 2^4 -
* 5*2^64 - 5*2^96 > 0 out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96
* - 3*2^96 > 0
- *
+ *
* out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 out[1] < 2^100 +
* 3*2^64 + 5*2^64 + 3*2^97 < 2^101 out[2] < 2^100 + 5*2^64 + 2^64 +
* 3*2^65 + 2^97 < 2^101 out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 <
* On exit:
* out[i] < 2^106
*/
-static void
+static void
felem_reduce_zero105(felem out, const longfelem in)
{
out[0] = zero105[0] + in[0];
* out[1] > 2^105 - 2^71 - 2^103 > 0 out[2] > 2^105 - 2^41 + 2^9 -
* 2^71 - 2^103 > 0 out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 -
* 2^103 > 0
- *
+ *
* out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 out[1] < 2^105 + 2^71 +
* 2^71 + 2^103 < 2^106 out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 <
* 2^106 out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
/* subtract_u64 sets *result = *result - v and *carry to one if the subtraction
* underflowed. */
-static void
+static void
subtract_u64(u64 * result, u64 * carry, u64 v)
{
uint128_t r = *result;
* On entry:
* in[i] < 2^109
*/
-static void
+static void
felem_contract(smallfelem out, const felem in)
{
unsigned i;
subtract_u64(&out[3], &carry, result & kPrime[3]);
}
-static void
+static void
smallfelem_square_contract(smallfelem out, const smallfelem in)
{
longfelem longtmp;
felem_contract(out, tmp);
}
-static void
+static void
smallfelem_mul_contract(smallfelem out, const smallfelem in1, const smallfelem in2)
{
longfelem longtmp;
* On entry:
* small[i] < 2^64
*/
-static limb
+static limb
smallfelem_is_zero(const smallfelem small)
{
limb result;
return result;
}
-static int
+static int
smallfelem_is_zero_int(const smallfelem small)
{
return (int) (smallfelem_is_zero(small) & ((limb) 1));
* a^{p-1} = 1 (mod p)
* a^{p-2} = a^{-1} (mod p)
*/
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2;
felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
}
-static void
+static void
smallfelem_inv_contract(smallfelem out, const smallfelem in)
{
felem tmp;
* are equal, (while not equal to the point at infinity). This case never
* happens during single point multiplication, so there is no timing leak for
* ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const smallfelem x2, const smallfelem y2, const smallfelem z2)
/* point_add_small is the same as point_add, except that it operates on
* smallfelems */
-static void
+static void
point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
smallfelem x1, smallfelem y1, smallfelem z1,
smallfelem x2, smallfelem y2, smallfelem z2)
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const u64 idx, unsigned int size, const smallfelem pre_comp[16][3], smallfelem out[3])
{
unsigned i, j;
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, int i)
{
if ((i < 0) || (i >= 256))
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const smallfelem pre_comp[][17][3], const smallfelem g_pre_comp[2][16][3])
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_nistp256_point_get_affine_coordinates,
+ ec_GFp_nistp256_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
return src_;
}
-static void
+static void
nistp256_pre_comp_free(void *pre_)
{
int i;
free(pre);
}
-static void
+static void
nistp256_pre_comp_clear_free(void *pre_)
{
int i;
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp256_group_init(EC_GROUP * group)
{
int ret;
return ret;
}
-int
+int
ec_GFp_nistp256_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
return 1;
}
-static void
+static void
make_points_affine(size_t num, smallfelem points[ /* num */ ][3], smallfelem tmp_smallfelems[ /* num+1 */ ])
{
/*
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp256_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp256_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
return ret;
}
-int
+int
ec_GFp_nistp256_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp256_pre_comp_dup,
-/* $OpenBSD: ecp_nistp521.c,v 1.25 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_nistp521.c,v 1.26 2021/04/20 17:38:02 tb Exp $ */
/*
* Written by Adam Langley (Google) for the OpenSSL project
*/
/* bin66_to_felem takes a little-endian byte array and converts it into felem
* form. This assumes that the CPU is little-endian. */
-static void
+static void
bin66_to_felem(felem out, const u8 in[66])
{
out[0] = (*((limb *) & in[0])) & bottom58bits;
/* felem_to_bin66 takes an felem and serialises into a little endian, 66 byte
* array. This assumes that the CPU is little-endian. */
-static void
+static void
felem_to_bin66(u8 out[66], const felem in)
{
memset(out, 0, 66);
}
/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
-static void
+static void
flip_endian(u8 * out, const u8 * in, unsigned len)
{
unsigned i;
}
/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
-static int
+static int
BN_to_felem(felem out, const BIGNUM * bn)
{
felem_bytearray b_in;
/* Field operations
* ---------------- */
-static void
+static void
felem_one(felem out)
{
out[0] = 1;
out[8] = 0;
}
-static void
+static void
felem_assign(felem out, const felem in)
{
out[0] = in[0];
}
/* felem_sum64 sets out = out + in. */
-static void
+static void
felem_sum64(felem out, const felem in)
{
out[0] += in[0];
}
/* felem_scalar sets out = in * scalar */
-static void
+static void
felem_scalar(felem out, const felem in, limb scalar)
{
out[0] = in[0] * scalar;
}
/* felem_scalar64 sets out = out * scalar */
-static void
+static void
felem_scalar64(felem out, limb scalar)
{
out[0] *= scalar;
}
/* felem_scalar128 sets out = out * scalar */
-static void
+static void
felem_scalar128(largefelem out, limb scalar)
{
out[0] *= scalar;
* On exit:
* out[i] < 2^62
*/
-static void
+static void
felem_neg(felem out, const felem in)
{
/* In order to prevent underflow, we subtract from 0 mod p. */
* On exit:
* out[i] < out[i] + 2^62
*/
-static void
+static void
felem_diff64(felem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
* On exit:
* out[i] < out[i] + 2^63
*/
-static void
+static void
felem_diff_128_64(largefelem out, const felem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
* On exit:
* out[i] < out[i] + 2^127 - 2^69
*/
-static void
+static void
felem_diff128(largefelem out, const largefelem in)
{
/* In order to prevent underflow, we add 0 mod p before subtracting. */
* On exit:
* out[i] < 17 * max(in[i]) * max(in[i])
*/
-static void
+static void
felem_square(largefelem out, const felem in)
{
felem inx2, inx4;
* On exit:
* out[i] < 17 * max(in1[i]) * max(in2[i])
*/
-static void
+static void
felem_mul(largefelem out, const felem in1, const felem in2)
{
felem in2x2;
* On exit:
* out[i] < 2^59 + 2^14
*/
-static void
+static void
felem_reduce(felem out, const largefelem in)
{
u64 overflow1, overflow2;
*/
}
-static void
+static void
felem_square_reduce(felem out, const felem in)
{
largefelem tmp;
felem_reduce(out, tmp);
}
-static void
+static void
felem_mul_reduce(felem out, const felem in1, const felem in2)
{
largefelem tmp;
* a^{p-1} = 1 (mod p)
* a^{p-2} = a^{-1} (mod p)
*/
-static void
+static void
felem_inv(felem out, const felem in)
{
felem ftmp, ftmp2, ftmp3, ftmp4;
* On entry:
* in[i] < 2^59 + 2^14
*/
-static limb
+static limb
felem_is_zero(const felem in)
{
felem ftmp;
return is_zero;
}
-static int
+static int
felem_is_zero_int(const felem in)
{
return (int) (felem_is_zero(in) & ((limb) 1));
* On entry:
* in[i] < 2^59 + 2^14
*/
-static void
+static void
felem_contract(felem out, const felem in)
{
limb is_p, is_greater, sign;
* are equal (while not equal to the point at infinity). This case never
* happens during single point multiplication, so there is no timing leak for
* ECDH or ECDSA signing. */
-static void
+static void
point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const felem x2, const felem y2, const felem z2)
/* select_point selects the |idx|th point from a precomputation table and
* copies it to out. */
-static void
+static void
select_point(const limb idx, unsigned int size, const felem pre_comp[ /* size */ ][3],
felem out[3])
{
}
/* get_bit returns the |i|th bit in |in| */
-static char
+static char
get_bit(const felem_bytearray in, int i)
{
if (i < 0)
* the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
* of the generator, using certain (large) precomputed multiples in g_pre_comp.
* Output point (X, Y, Z) is stored in x_out, y_out, z_out */
-static void
+static void
batch_mul(felem x_out, felem y_out, felem z_out,
const felem_bytearray scalars[], const unsigned num_points, const u8 * g_scalar,
const int mixed, const felem pre_comp[][17][3], const felem g_pre_comp[16][3])
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_nistp521_point_get_affine_coordinates,
+ ec_GFp_nistp521_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
return src_;
}
-static void
+static void
nistp521_pre_comp_free(void *pre_)
{
int i;
free(pre);
}
-static void
+static void
nistp521_pre_comp_clear_free(void *pre_)
{
int i;
/* OPENSSL EC_METHOD FUNCTIONS
*/
-int
+int
ec_GFp_nistp521_group_init(EC_GROUP * group)
{
int ret;
return ret;
}
-int
+int
ec_GFp_nistp521_group_set_curve(EC_GROUP * group, const BIGNUM * p,
const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
* (X', Y') = (X/Z^2, Y/Z^3) */
-int
+int
ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP * group,
const EC_POINT * point, BIGNUM * x, BIGNUM * y, BN_CTX * ctx)
{
return 1;
}
-static void
+static void
make_points_affine(size_t num, felem points[ /* num */ ][3], felem tmp_felems[ /* num+1 */ ])
{
/*
/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
* Result is stored in r (r can equal one of the inputs). */
-int
+int
ec_GFp_nistp521_points_mul(const EC_GROUP * group, EC_POINT * r,
const BIGNUM * scalar, size_t num, const EC_POINT * points[],
const BIGNUM * scalars[], BN_CTX * ctx)
return ret;
}
-int
+int
ec_GFp_nistp521_precompute_mult(EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
return ret;
}
-int
+int
ec_GFp_nistp521_have_precompute_mult(const EC_GROUP * group)
{
if (EC_EX_DATA_get_data(group->extra_data, nistp521_pre_comp_dup,
-/* $OpenBSD: ecp_smpl.c,v 1.31 2021/04/20 17:28:18 tb Exp $ */
+/* $OpenBSD: ecp_smpl.c,v 1.32 2021/04/20 17:38:02 tb Exp $ */
/* Includes code written by Lenka Fibikova <fibikova@exp-math.uni-essen.de>
* for the OpenSSL project.
* Includes code written by Bodo Moeller for the OpenSSL project.
.group_get_curve = ec_GFp_simple_group_get_curve,
.group_get_degree = ec_GFp_simple_group_get_degree,
.group_check_discriminant =
- ec_GFp_simple_group_check_discriminant,
+ ec_GFp_simple_group_check_discriminant,
.point_init = ec_GFp_simple_point_init,
.point_finish = ec_GFp_simple_point_finish,
.point_clear_finish = ec_GFp_simple_point_clear_finish,
.point_copy = ec_GFp_simple_point_copy,
.point_set_to_infinity = ec_GFp_simple_point_set_to_infinity,
.point_set_Jprojective_coordinates =
- ec_GFp_simple_set_Jprojective_coordinates,
+ ec_GFp_simple_set_Jprojective_coordinates,
.point_get_Jprojective_coordinates =
- ec_GFp_simple_get_Jprojective_coordinates,
+ ec_GFp_simple_get_Jprojective_coordinates,
.point_set_affine_coordinates =
- ec_GFp_simple_point_set_affine_coordinates,
+ ec_GFp_simple_point_set_affine_coordinates,
.point_get_affine_coordinates =
- ec_GFp_simple_point_get_affine_coordinates,
+ ec_GFp_simple_point_get_affine_coordinates,
.add = ec_GFp_simple_add,
.dbl = ec_GFp_simple_dbl,
.invert = ec_GFp_simple_invert,
*/
-int
+int
ec_GFp_simple_group_init(EC_GROUP * group)
{
BN_init(&group->field);
}
-void
+void
ec_GFp_simple_group_finish(EC_GROUP * group)
{
BN_free(&group->field);
}
-void
+void
ec_GFp_simple_group_clear_finish(EC_GROUP * group)
{
BN_clear_free(&group->field);
}
-int
+int
ec_GFp_simple_group_copy(EC_GROUP * dest, const EC_GROUP * src)
{
if (!BN_copy(&dest->field, &src->field))
}
-int
+int
ec_GFp_simple_group_set_curve(EC_GROUP * group,
const BIGNUM * p, const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
}
-int
+int
ec_GFp_simple_group_get_curve(const EC_GROUP * group, BIGNUM * p, BIGNUM * a, BIGNUM * b, BN_CTX * ctx)
{
int ret = 0;
}
-int
+int
ec_GFp_simple_group_get_degree(const EC_GROUP * group)
{
return BN_num_bits(&group->field);
}
-int
+int
ec_GFp_simple_group_check_discriminant(const EC_GROUP * group, BN_CTX * ctx)
{
int ret = 0;
}
-int
+int
ec_GFp_simple_point_init(EC_POINT * point)
{
BN_init(&point->X);
}
-void
+void
ec_GFp_simple_point_finish(EC_POINT * point)
{
BN_free(&point->X);
}
-void
+void
ec_GFp_simple_point_clear_finish(EC_POINT * point)
{
BN_clear_free(&point->X);
}
-int
+int
ec_GFp_simple_point_copy(EC_POINT * dest, const EC_POINT * src)
{
if (!BN_copy(&dest->X, &src->X))
}
-int
+int
ec_GFp_simple_point_set_to_infinity(const EC_GROUP * group, EC_POINT * point)
{
point->Z_is_one = 0;
return ret;
}
-int
+int
ec_GFp_simple_add(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, const EC_POINT * b, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
}
-int
+int
ec_GFp_simple_dbl(const EC_GROUP * group, EC_POINT * r, const EC_POINT * a, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
}
-int
+int
ec_GFp_simple_invert(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx)
{
if (EC_POINT_is_at_infinity(group, point) > 0 || BN_is_zero(&point->Y))
}
-int
+int
ec_GFp_simple_is_at_infinity(const EC_GROUP * group, const EC_POINT * point)
{
return BN_is_zero(&point->Z);
}
-int
+int
ec_GFp_simple_is_on_curve(const EC_GROUP * group, const EC_POINT * point, BN_CTX * ctx)
{
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
}
-int
+int
ec_GFp_simple_cmp(const EC_GROUP * group, const EC_POINT * a, const EC_POINT * b, BN_CTX * ctx)
{
/*
}
-int
+int
ec_GFp_simple_make_affine(const EC_GROUP * group, EC_POINT * point, BN_CTX * ctx)
{
BN_CTX *new_ctx = NULL;
}
-int
+int
ec_GFp_simple_points_make_affine(const EC_GROUP * group, size_t num, EC_POINT * points[], BN_CTX * ctx)
{
BN_CTX *new_ctx = NULL;
/*
* The array is used as a binary tree, exactly as in heapsort:
- *
+ *
* heap[1] heap[2] heap[3] heap[4] heap[5]
* heap[6] heap[7] heap[8]heap[9] heap[10]heap[11]
* heap[12]heap[13] heap[14] heap[15]
- *
+ *
* We put the Z's in the last line; then we set each other node to the
* product of its two child-nodes (where empty or 0 entries are
* treated as ones); then we invert heap[1]; then we invert each
}
-int
+int
ec_GFp_simple_field_mul(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a, const BIGNUM * b, BN_CTX * ctx)
{
return BN_mod_mul(r, a, b, &group->field, ctx);
}
-int
+int
ec_GFp_simple_field_sqr(const EC_GROUP * group, BIGNUM * r, const BIGNUM * a, BN_CTX * ctx)
{
return BN_mod_sqr(r, a, &group->field, ctx);
* Apply randomization of EC point projective coordinates:
*
* (X, Y, Z) = (lambda^2 * X, lambda^3 * Y, lambda * Z)
- *
+ *
* where lambda is in the interval [1, group->field).
*/
int
}
/* one final cswap to move the right value into r */
EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
-
+
ret = 1;
err: