rust-secp256k1-unsafe-fast/depend/secp256k1/src/ecmult_const_impl.h

261 lines
9.5 KiB
C
Raw Normal View History

2015-10-26 14:54:21 +00:00
/**********************************************************************
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_ECMULT_CONST_IMPL_
#define _SECP256K1_ECMULT_CONST_IMPL_
#include "scalar.h"
#include "group.h"
#include "ecmult_const.h"
#include "ecmult_impl.h"
#ifdef USE_ENDOMORPHISM
#define WNAF_BITS 128
#else
#define WNAF_BITS 256
#endif
#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w))
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
int m; \
int abs_n = (n) * (((n) > 0) * 2 - 1); \
int idx_n = abs_n / 2; \
secp256k1_fe neg_y; \
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \
/* This loop is used to avoid secret data in array indices. See
* the comment in ecmult_gen_impl.h for rationale. */ \
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
} \
(r)->infinity = 0; \
secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
} while(0)
/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val)
* with the following guarantees:
* - each wnaf[i] an odd integer between -(1 << w) and (1 << w)
* - each wnaf[i] is nonzero
* - the number of words set is returned; this is always (WNAF_BITS + w - 1) / w
*
* Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar
* Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.)
* CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
int global_sign;
int skew = 0;
int word = 0;
/* 1 2 3 */
int u_last;
int u;
#ifdef USE_ENDOMORPHISM
int flip;
int bit;
secp256k1_scalar neg_s;
int not_neg_one;
/* If we are using the endomorphism, we cannot handle even numbers by negating
* them, since we are working with 128-bit numbers whose negations would be 256
* bits, eliminating the performance advantage. Instead we use a technique from
* Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
* or 2 (for odd) to the number we are encoding, then compensating after the
* multiplication. */
/* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */
flip = secp256k1_scalar_is_high(&s);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
bit = flip ^ (s.d[0] & 1);
/* We check for negative one, since adding 2 to it will cause an overflow */
secp256k1_scalar_negate(&neg_s, &s);
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
* that we added two to it and flipped it. In fact for -1 these operations are
* identical. We only flipped, but since skewing is required (in the sense that
* the skew must be 1 or 2, never zero) and flipping is not, we need to change
* our flags to claim that we only skewed. */
global_sign = secp256k1_scalar_cond_negate(&s, flip);
global_sign *= not_neg_one * 2 - 1;
skew = 1 << bit;
#else
/* Otherwise, we just negate to force oddness */
int is_even = secp256k1_scalar_is_even(&s);
global_sign = secp256k1_scalar_cond_negate(&s, is_even);
#endif
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
while (word * w < WNAF_BITS) {
int sign;
int even;
/* 4.1 4.4 */
u = secp256k1_scalar_shr_int(&s, w);
/* 4.2 */
even = ((u & 1) == 0);
sign = 2 * (u_last > 0) - 1;
u += sign * even;
u_last -= sign * even * (1 << w);
/* 4.3, adapted for global sign change */
wnaf[word++] = u_last * global_sign;
u_last = u;
}
wnaf[word] = u * global_sign;
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
VERIFY_CHECK(word == WNAF_SIZE(w));
return skew;
}
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar) {
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge tmpa;
secp256k1_fe Z;
#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_1;
int skew_lam;
secp256k1_scalar q_1, q_lam;
#else
int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)];
#endif
int i;
secp256k1_scalar sc = *scalar;
/* build wnaf representation for q. */
#ifdef USE_ENDOMORPHISM
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc);
/* no need for zero correction when using endomorphism since even
* numbers have one added to them anyway */
skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1);
skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1);
#else
int is_zero = secp256k1_scalar_is_zero(scalar);
/* the wNAF ladder cannot handle zero, so bump this to one .. we will
* correct the result after the fact */
sc.d[0] += is_zero;
VERIFY_CHECK(!secp256k1_scalar_is_zero(&sc));
secp256k1_wnaf_const(wnaf, sc, WINDOW_A - 1);
#endif
/* Calculate odd multiples of a.
* All multiples are brought to the same Z 'denominator', which is stored
* in Z. Due to secp256k1' isomorphism we can do all operations pretending
* that the Z coordinate was 1, use affine addition formulae, and correct
* the Z coordinate of the result once at the end.
*/
secp256k1_gej_set_ge(r, a);
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
#ifdef USE_ENDOMORPHISM
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
#endif
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
* its new value added to it) */
#ifdef USE_ENDOMORPHISM
i = wnaf_1[WNAF_SIZE(WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
i = wnaf_lam[WNAF_SIZE(WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
#else
i = wnaf[WNAF_SIZE(WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
#endif
/* remaining loop iterations */
for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
int j;
for (j = 0; j < WINDOW_A - 1; ++j) {
secp256k1_gej_double_nonzero(r, r, NULL);
}
#ifdef USE_ENDOMORPHISM
n = wnaf_1[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
#else
n = wnaf[i];
VERIFY_CHECK(n != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
#ifdef USE_ENDOMORPHISM
{
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
secp256k1_ge_storage correction_lam_stor;
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
secp256k1_ge_to_storage(&correction_lam_stor, a);
secp256k1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
}
#else
/* correct for zero */
r->infinity |= is_zero;
#endif
}
#endif