2015-10-26 14:54:21 +00:00
|
|
|
/**********************************************************************
|
|
|
|
* Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
|
|
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
|
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|
|
|
**********************************************************************/
|
|
|
|
|
2018-07-09 11:17:44 +00:00
|
|
|
#ifndef SECP256K1_ECMULT_GEN_IMPL_H
|
|
|
|
#define SECP256K1_ECMULT_GEN_IMPL_H
|
2015-10-26 14:54:21 +00:00
|
|
|
|
2019-05-28 12:23:28 +00:00
|
|
|
#include "util.h"
|
2015-10-26 14:54:21 +00:00
|
|
|
#include "scalar.h"
|
|
|
|
#include "group.h"
|
|
|
|
#include "ecmult_gen.h"
|
|
|
|
#include "hash_impl.h"
|
|
|
|
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
|
|
|
|
#include "ecmult_static_context.h"
|
|
|
|
#endif
|
2019-05-28 12:23:28 +00:00
|
|
|
|
|
|
|
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
|
2019-12-11 15:55:00 +00:00
|
|
|
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_1_1_ecmult_gen_context*) NULL)->prec));
|
2019-05-28 12:23:28 +00:00
|
|
|
#else
|
|
|
|
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
|
|
|
|
#endif
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen_context_init(rustsecp256k1_v0_1_1_ecmult_gen_context *ctx) {
|
2015-10-26 14:54:21 +00:00
|
|
|
ctx->prec = NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen_context_build(rustsecp256k1_v0_1_1_ecmult_gen_context *ctx, void **prealloc) {
|
2015-10-26 14:54:21 +00:00
|
|
|
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge prec[1024];
|
|
|
|
rustsecp256k1_v0_1_1_gej gj;
|
|
|
|
rustsecp256k1_v0_1_1_gej nums_gej;
|
2015-10-26 14:54:21 +00:00
|
|
|
int i, j;
|
2019-05-28 12:23:28 +00:00
|
|
|
size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
|
|
|
|
void* const base = *prealloc;
|
2015-10-26 14:54:21 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (ctx->prec != NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
|
2019-12-11 15:55:00 +00:00
|
|
|
ctx->prec = (rustsecp256k1_v0_1_1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
|
2015-10-26 14:54:21 +00:00
|
|
|
|
|
|
|
/* get the generator */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_set_ge(&gj, &rustsecp256k1_v0_1_1_ge_const_g);
|
2015-10-26 14:54:21 +00:00
|
|
|
|
|
|
|
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
|
|
|
|
{
|
|
|
|
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_fe nums_x;
|
|
|
|
rustsecp256k1_v0_1_1_ge nums_ge;
|
2015-10-26 14:54:21 +00:00
|
|
|
int r;
|
2019-12-11 15:55:00 +00:00
|
|
|
r = rustsecp256k1_v0_1_1_fe_set_b32(&nums_x, nums_b32);
|
2015-10-26 14:54:21 +00:00
|
|
|
(void)r;
|
|
|
|
VERIFY_CHECK(r);
|
2019-12-11 15:55:00 +00:00
|
|
|
r = rustsecp256k1_v0_1_1_ge_set_xo_var(&nums_ge, &nums_x, 0);
|
2015-10-26 14:54:21 +00:00
|
|
|
(void)r;
|
|
|
|
VERIFY_CHECK(r);
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_set_ge(&nums_gej, &nums_ge);
|
2015-10-26 14:54:21 +00:00
|
|
|
/* Add G to make the bits in x uniformly distributed. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_1_1_ge_const_g, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* compute prec. */
|
|
|
|
{
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej precj[1024]; /* Jacobian versions of prec. */
|
|
|
|
rustsecp256k1_v0_1_1_gej gbase;
|
|
|
|
rustsecp256k1_v0_1_1_gej numsbase;
|
2015-10-26 14:54:21 +00:00
|
|
|
gbase = gj; /* 16^j * G */
|
|
|
|
numsbase = nums_gej; /* 2^j * nums. */
|
|
|
|
for (j = 0; j < 64; j++) {
|
|
|
|
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
|
|
|
|
precj[j*16] = numsbase;
|
|
|
|
for (i = 1; i < 16; i++) {
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
/* Multiply gbase by 16. */
|
|
|
|
for (i = 0; i < 4; i++) {
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_double_var(&gbase, &gbase, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
/* Multiply numbase by 2. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_double_var(&numsbase, &numsbase, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
if (j == 62) {
|
|
|
|
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_neg(&numsbase, &numsbase);
|
|
|
|
rustsecp256k1_v0_1_1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge_set_all_gej_var(prec, precj, 1024);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
for (j = 0; j < 64; j++) {
|
|
|
|
for (i = 0; i < 16; i++) {
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2019-05-28 12:23:28 +00:00
|
|
|
(void)prealloc;
|
2019-12-11 15:55:00 +00:00
|
|
|
ctx->prec = (rustsecp256k1_v0_1_1_ge_storage (*)[64][16])rustsecp256k1_v0_1_1_ecmult_static_context;
|
2015-10-26 14:54:21 +00:00
|
|
|
#endif
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ecmult_gen_blind(ctx, NULL);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static int rustsecp256k1_v0_1_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_1_1_ecmult_gen_context* ctx) {
|
2015-10-26 14:54:21 +00:00
|
|
|
return ctx->prec != NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_1_1_ecmult_gen_context *dst, const rustsecp256k1_v0_1_1_ecmult_gen_context *src) {
|
2015-10-26 14:54:21 +00:00
|
|
|
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
|
2019-05-28 12:23:28 +00:00
|
|
|
if (src->prec != NULL) {
|
|
|
|
/* We cast to void* first to suppress a -Wcast-align warning. */
|
2019-12-11 15:55:00 +00:00
|
|
|
dst->prec = (rustsecp256k1_v0_1_1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
|
2019-05-28 12:23:28 +00:00
|
|
|
}
|
2015-10-26 14:54:21 +00:00
|
|
|
#else
|
2019-05-28 12:23:28 +00:00
|
|
|
(void)dst, (void)src;
|
2015-10-26 14:54:21 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen_context_clear(rustsecp256k1_v0_1_1_ecmult_gen_context *ctx) {
|
|
|
|
rustsecp256k1_v0_1_1_scalar_clear(&ctx->blind);
|
|
|
|
rustsecp256k1_v0_1_1_gej_clear(&ctx->initial);
|
2015-10-26 14:54:21 +00:00
|
|
|
ctx->prec = NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen(const rustsecp256k1_v0_1_1_ecmult_gen_context *ctx, rustsecp256k1_v0_1_1_gej *r, const rustsecp256k1_v0_1_1_scalar *gn) {
|
|
|
|
rustsecp256k1_v0_1_1_ge add;
|
|
|
|
rustsecp256k1_v0_1_1_ge_storage adds;
|
|
|
|
rustsecp256k1_v0_1_1_scalar gnb;
|
2015-10-26 14:54:21 +00:00
|
|
|
int bits;
|
|
|
|
int i, j;
|
|
|
|
memset(&adds, 0, sizeof(adds));
|
|
|
|
*r = ctx->initial;
|
|
|
|
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_scalar_add(&gnb, gn, &ctx->blind);
|
2015-10-26 14:54:21 +00:00
|
|
|
add.infinity = 0;
|
|
|
|
for (j = 0; j < 64; j++) {
|
2019-12-11 15:55:00 +00:00
|
|
|
bits = rustsecp256k1_v0_1_1_scalar_get_bits(&gnb, j * 4, 4);
|
2015-10-26 14:54:21 +00:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
/** This uses a conditional move to avoid any secret data in array indexes.
|
|
|
|
* _Any_ use of secret indexes has been demonstrated to result in timing
|
|
|
|
* sidechannels, even when the cache-line access patterns are uniform.
|
|
|
|
* See also:
|
|
|
|
* "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
|
|
|
|
* (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
|
|
|
|
* "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
|
|
|
|
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
|
|
|
|
* (http://www.tau.ac.il/~tromer/papers/cache.pdf)
|
|
|
|
*/
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge_from_storage(&add, &adds);
|
|
|
|
rustsecp256k1_v0_1_1_gej_add_ge(r, r, &add);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
bits = 0;
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ge_clear(&add);
|
|
|
|
rustsecp256k1_v0_1_1_scalar_clear(&gnb);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-11 15:55:00 +00:00
|
|
|
/* Setup blinding values for rustsecp256k1_v0_1_1_ecmult_gen. */
|
|
|
|
static void rustsecp256k1_v0_1_1_ecmult_gen_blind(rustsecp256k1_v0_1_1_ecmult_gen_context *ctx, const unsigned char *seed32) {
|
|
|
|
rustsecp256k1_v0_1_1_scalar b;
|
|
|
|
rustsecp256k1_v0_1_1_gej gb;
|
|
|
|
rustsecp256k1_v0_1_1_fe s;
|
2015-10-26 14:54:21 +00:00
|
|
|
unsigned char nonce32[32];
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_rfc6979_hmac_sha256 rng;
|
2015-10-26 14:54:21 +00:00
|
|
|
int retry;
|
|
|
|
unsigned char keydata[64] = {0};
|
|
|
|
if (seed32 == NULL) {
|
|
|
|
/* When seed is NULL, reset the initial point and blinding value. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_1_1_ge_const_g);
|
|
|
|
rustsecp256k1_v0_1_1_gej_neg(&ctx->initial, &ctx->initial);
|
|
|
|
rustsecp256k1_v0_1_1_scalar_set_int(&ctx->blind, 1);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_scalar_get_b32(nonce32, &ctx->blind);
|
2015-10-26 14:54:21 +00:00
|
|
|
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
|
|
|
|
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
|
|
|
|
* asking the caller for blinding values directly and expecting them to retry on failure.
|
|
|
|
*/
|
|
|
|
memcpy(keydata, nonce32, 32);
|
|
|
|
if (seed32 != NULL) {
|
|
|
|
memcpy(keydata + 32, seed32, 32);
|
|
|
|
}
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
|
2015-10-26 14:54:21 +00:00
|
|
|
memset(keydata, 0, sizeof(keydata));
|
|
|
|
/* Retry for out of range results to achieve uniformity. */
|
|
|
|
do {
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
|
|
|
retry = !rustsecp256k1_v0_1_1_fe_set_b32(&s, nonce32);
|
|
|
|
retry |= rustsecp256k1_v0_1_1_fe_is_zero(&s);
|
2015-10-26 14:54:21 +00:00
|
|
|
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */
|
|
|
|
/* Randomize the projection to defend against multiplier sidechannels. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_gej_rescale(&ctx->initial, &s);
|
|
|
|
rustsecp256k1_v0_1_1_fe_clear(&s);
|
2015-10-26 14:54:21 +00:00
|
|
|
do {
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
|
|
|
rustsecp256k1_v0_1_1_scalar_set_b32(&b, nonce32, &retry);
|
2015-10-26 14:54:21 +00:00
|
|
|
/* A blinding value of 0 works, but would undermine the projection hardening. */
|
2019-12-11 15:55:00 +00:00
|
|
|
retry |= rustsecp256k1_v0_1_1_scalar_is_zero(&b);
|
2015-10-26 14:54:21 +00:00
|
|
|
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_rfc6979_hmac_sha256_finalize(&rng);
|
2015-10-26 14:54:21 +00:00
|
|
|
memset(nonce32, 0, 32);
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_ecmult_gen(ctx, &gb, &b);
|
|
|
|
rustsecp256k1_v0_1_1_scalar_negate(&b, &b);
|
2015-10-26 14:54:21 +00:00
|
|
|
ctx->blind = b;
|
|
|
|
ctx->initial = gb;
|
2019-12-11 15:55:00 +00:00
|
|
|
rustsecp256k1_v0_1_1_scalar_clear(&b);
|
|
|
|
rustsecp256k1_v0_1_1_gej_clear(&gb);
|
2015-10-26 14:54:21 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 11:17:44 +00:00
|
|
|
#endif /* SECP256K1_ECMULT_GEN_IMPL_H */
|