Work on optimizations

This commit is contained in:
Christian Reitter 2024-04-28 21:15:47 +02:00
parent 36af387bea
commit 23c6669229
6 changed files with 25 additions and 9 deletions

View File

@ -91,11 +91,11 @@ static void rustsecp256k1_v0_10_0_ecmult_gen(const rustsecp256k1_v0_10_0_ecmult_
(void)j;
#endif
}
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
n_i = 0;
rustsecp256k1_v0_10_0_ge_clear(&add);
rustsecp256k1_v0_10_0_scalar_clear(&gnb);
#endif
#endif
}
/* Setup blinding values for rustsecp256k1_v0_10_0_ecmult_gen. */
@ -131,7 +131,9 @@ static void rustsecp256k1_v0_10_0_ecmult_gen_blind(rustsecp256k1_v0_10_0_ecmult_
/* Randomize the projection to defend against multiplier sidechannels.
Do this before our own call to rustsecp256k1_v0_10_0_ecmult_gen below. */
rustsecp256k1_v0_10_0_gej_rescale(&ctx->initial, &s);
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
rustsecp256k1_v0_10_0_fe_clear(&s);
#endif
rustsecp256k1_v0_10_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
rustsecp256k1_v0_10_0_scalar_set_b32(&b, nonce32, NULL);
/* A blinding value of 0 works, but would undermine the projection hardening. */

View File

@ -127,7 +127,8 @@ static void rustsecp256k1_v0_10_0_ge_set_ge_zinv(rustsecp256k1_v0_10_0_ge *r, co
SECP256K1_GE_VERIFY(r);
}
static void rustsecp256k1_v0_10_0_ge_set_xy(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_fe *y) {
// OPTIMIZATION force inline
SECP256K1_INLINE static void rustsecp256k1_v0_10_0_ge_set_xy(rustsecp256k1_v0_10_0_ge *r, const rustsecp256k1_v0_10_0_fe *x, const rustsecp256k1_v0_10_0_fe *y) {
SECP256K1_FE_VERIFY(x);
SECP256K1_FE_VERIFY(y);
@ -138,7 +139,8 @@ static void rustsecp256k1_v0_10_0_ge_set_xy(rustsecp256k1_v0_10_0_ge *r, const r
SECP256K1_GE_VERIFY(r);
}
static int rustsecp256k1_v0_10_0_ge_is_infinity(const rustsecp256k1_v0_10_0_ge *a) {
// OPTIMIZATION force inline
SECP256K1_INLINE static int rustsecp256k1_v0_10_0_ge_is_infinity(const rustsecp256k1_v0_10_0_ge *a) {
SECP256K1_GE_VERIFY(a);
return a->infinity;
@ -406,7 +408,8 @@ static void rustsecp256k1_v0_10_0_gej_neg(rustsecp256k1_v0_10_0_gej *r, const ru
SECP256K1_GEJ_VERIFY(r);
}
static int rustsecp256k1_v0_10_0_gej_is_infinity(const rustsecp256k1_v0_10_0_gej *a) {
// OPTIMIZATION force inline
SECP256K1_INLINE static int rustsecp256k1_v0_10_0_gej_is_infinity(const rustsecp256k1_v0_10_0_gej *a) {
SECP256K1_GEJ_VERIFY(a);
return a->infinity;

View File

@ -497,7 +497,8 @@ static void rustsecp256k1_v0_10_0_modinv64_update_de_62(rustsecp256k1_v0_10_0_mo
*
* This implements the update_fg function from the explanation.
*/
static void rustsecp256k1_v0_10_0_modinv64_update_fg_62(rustsecp256k1_v0_10_0_modinv64_signed62 *f, rustsecp256k1_v0_10_0_modinv64_signed62 *g, const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) {
// OPTIMIZATION force inline
static SECP256K1_INLINE void rustsecp256k1_v0_10_0_modinv64_update_fg_62(rustsecp256k1_v0_10_0_modinv64_signed62 *f, rustsecp256k1_v0_10_0_modinv64_signed62 *g, const rustsecp256k1_v0_10_0_modinv64_trans2x2 *t) {
const uint64_t M62 = UINT64_MAX >> 2;
const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];

View File

@ -147,7 +147,8 @@ static void rustsecp256k1_v0_10_0_scalar_cadd_bit(rustsecp256k1_v0_10_0_scalar *
VERIFY_CHECK(rustsecp256k1_v0_10_0_u128_hi_u64(&t) == 0);
}
static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *b32, int *overflow) {
// OPTIMIZATION force inline
SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = rustsecp256k1_v0_10_0_read_be64(&b32[24]);
r->d[1] = rustsecp256k1_v0_10_0_read_be64(&b32[16]);
@ -161,7 +162,8 @@ static void rustsecp256k1_v0_10_0_scalar_set_b32(rustsecp256k1_v0_10_0_scalar *r
SECP256K1_SCALAR_VERIFY(r);
}
static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a) {
// OPTIMIZATION force inline
SECP256K1_INLINE static void rustsecp256k1_v0_10_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_10_0_scalar* a) {
SECP256K1_SCALAR_VERIFY(a);
rustsecp256k1_v0_10_0_write_be64(&bin[0], a->d[3]);

View File

@ -27,7 +27,8 @@
static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
static const rustsecp256k1_v0_10_0_scalar rustsecp256k1_v0_10_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
static int rustsecp256k1_v0_10_0_scalar_set_b32_seckey(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *bin) {
// OPTIMIZATION force inline
SECP256K1_INLINE static int rustsecp256k1_v0_10_0_scalar_set_b32_seckey(rustsecp256k1_v0_10_0_scalar *r, const unsigned char *bin) {
int overflow;
rustsecp256k1_v0_10_0_scalar_set_b32(r, bin, &overflow);

View File

@ -560,7 +560,9 @@ int rustsecp256k1_v0_10_0_ec_pubkey_create(const rustsecp256k1_v0_10_0_context*
rustsecp256k1_v0_10_0_pubkey_save(pubkey, &p);
rustsecp256k1_v0_10_0_memczero(pubkey, sizeof(*pubkey), !ret);
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
rustsecp256k1_v0_10_0_scalar_clear(&seckey_scalar);
#endif
return ret;
}
@ -606,7 +608,10 @@ static int rustsecp256k1_v0_10_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_10_
rustsecp256k1_v0_10_0_scalar_set_b32(&term, tweak32, &overflow);
ret = (!overflow) & rustsecp256k1_v0_10_0_eckey_privkey_tweak_add(sec, &term);
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
rustsecp256k1_v0_10_0_scalar_clear(&term);
#endif
return ret;
}
@ -622,7 +627,9 @@ int rustsecp256k1_v0_10_0_ec_seckey_tweak_add(const rustsecp256k1_v0_10_0_contex
rustsecp256k1_v0_10_0_scalar_cmov(&sec, &rustsecp256k1_v0_10_0_scalar_zero, !ret);
rustsecp256k1_v0_10_0_scalar_get_b32(seckey, &sec);
#ifndef OPTIMIZE_UNSAFE_SKIP_ZEROING
rustsecp256k1_v0_10_0_scalar_clear(&sec);
#endif
return ret;
}