rust-secp256k1-unsafe-fast/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c

518 lines
21 KiB
C
Raw Normal View History

/***********************************************************************
* Copyright (c) 2016 Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifndef EXHAUSTIVE_TEST_ORDER
/* see group_impl.h for allowable values */
#define EXHAUSTIVE_TEST_ORDER 13
#endif
2023-09-27 18:37:09 +00:00
/* These values of B are all values in [1, 8] that result in a curve with even order. */
#define EXHAUSTIVE_TEST_CURVE_HAS_EVEN_ORDER (SECP256K1_B == 1 || SECP256K1_B == 6 || SECP256K1_B == 8)
#ifdef USE_EXTERNAL_DEFAULT_CALLBACKS
#pragma message("Ignoring USE_EXTERNAL_CALLBACKS in exhaustive_tests.")
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#endif
#include "secp256k1.c"
2023-09-27 18:37:09 +00:00
#include "../include/secp256k1.h"
#include "assumptions.h"
#include "group.h"
#include "testrand_impl.h"
#include "ecmult_compute_table_impl.h"
#include "ecmult_gen_compute_table_impl.h"
2023-09-27 18:37:09 +00:00
#include "util.h"
2020-09-15 01:39:26 +00:00
static int count = 2;
/** stolen from tests.c */
2023-09-27 18:37:09 +00:00
static void ge_equals_ge(const rustsecp256k1_v0_9_0_ge *a, const rustsecp256k1_v0_9_0_ge *b) {
CHECK(a->infinity == b->infinity);
if (a->infinity) {
return;
}
2023-09-27 18:37:09 +00:00
CHECK(rustsecp256k1_v0_9_0_fe_equal(&a->x, &b->x));
CHECK(rustsecp256k1_v0_9_0_fe_equal(&a->y, &b->y));
}
2023-09-27 18:37:09 +00:00
static void ge_equals_gej(const rustsecp256k1_v0_9_0_ge *a, const rustsecp256k1_v0_9_0_gej *b) {
rustsecp256k1_v0_9_0_fe z2s;
rustsecp256k1_v0_9_0_fe u1, u2, s1, s2;
CHECK(a->infinity == b->infinity);
if (a->infinity) {
return;
}
/* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_fe_sqr(&z2s, &b->z);
rustsecp256k1_v0_9_0_fe_mul(&u1, &a->x, &z2s);
u2 = b->x;
rustsecp256k1_v0_9_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_9_0_fe_mul(&s1, &s1, &b->z);
s2 = b->y;
CHECK(rustsecp256k1_v0_9_0_fe_equal(&u1, &u2));
CHECK(rustsecp256k1_v0_9_0_fe_equal(&s1, &s2));
}
2023-09-27 18:37:09 +00:00
static void random_fe(rustsecp256k1_v0_9_0_fe *x) {
unsigned char bin[32];
do {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_testrand256(bin);
if (rustsecp256k1_v0_9_0_fe_set_b32_limit(x, bin)) {
return;
}
} while(1);
}
2023-09-27 18:37:09 +00:00
static void random_fe_non_zero(rustsecp256k1_v0_9_0_fe *nz) {
int tries = 10;
while (--tries >= 0) {
random_fe(nz);
rustsecp256k1_v0_9_0_fe_normalize(nz);
if (!rustsecp256k1_v0_9_0_fe_is_zero(nz)) {
break;
}
}
/* Infinitesimal probability of spurious failure here */
CHECK(tries >= 0);
}
/** END stolen from tests.c */
2020-09-15 01:39:26 +00:00
static uint32_t num_cores = 1;
static uint32_t this_core = 0;
SECP256K1_INLINE static int skip_section(uint64_t* iter) {
if (num_cores == 1) return 0;
*iter += 0xe7037ed1a0b428dbULL;
return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core;
}
2023-09-27 18:37:09 +00:00
static int rustsecp256k1_v0_9_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *algo16,
void *data, unsigned int attempt) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar s;
int *idata = data;
(void)msg32;
(void)key32;
(void)algo16;
/* Some nonces cannot be used because they'd cause s and/or r to be zero.
* The signing function has retry logic here that just re-calls the nonce
* function with an increased `attempt`. So if attempt > 0 this means we
* need to change the nonce to avoid an infinite loop. */
if (attempt > 0) {
*idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER;
}
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&s, *idata);
rustsecp256k1_v0_9_0_scalar_get_b32(nonce32, &s);
return 1;
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_endomorphism(const rustsecp256k1_v0_9_0_ge *group) {
int i;
2020-09-15 01:39:26 +00:00
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ge res;
rustsecp256k1_v0_9_0_ge_mul_lambda(&res, &group[i]);
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
}
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_addition(const rustsecp256k1_v0_9_0_ge *group, const rustsecp256k1_v0_9_0_gej *groupj) {
int i, j;
2020-09-15 01:39:26 +00:00
uint64_t iter = 0;
/* Sanity-check (and check infinity functions) */
2023-09-27 18:37:09 +00:00
CHECK(rustsecp256k1_v0_9_0_ge_is_infinity(&group[0]));
CHECK(rustsecp256k1_v0_9_0_gej_is_infinity(&groupj[0]));
2020-09-15 01:39:26 +00:00
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
CHECK(!rustsecp256k1_v0_9_0_ge_is_infinity(&group[i]));
CHECK(!rustsecp256k1_v0_9_0_gej_is_infinity(&groupj[i]));
}
/* Check all addition formulae */
2020-09-15 01:39:26 +00:00
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_fe fe_inv;
2020-09-15 01:39:26 +00:00
if (skip_section(&iter)) continue;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_fe_inv(&fe_inv, &groupj[j].z);
2020-09-15 01:39:26 +00:00
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ge zless_gej;
rustsecp256k1_v0_9_0_gej tmp;
/* add_var */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_ge */
if (j > 0) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_add_ge(&tmp, &groupj[i], &group[j]);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* add_ge_var */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_zinv_var */
zless_gej.infinity = groupj[j].infinity;
zless_gej.x = groupj[j].x;
zless_gej.y = groupj[j].y;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
/* Check doubling */
2020-09-15 01:39:26 +00:00
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej tmp;
rustsecp256k1_v0_9_0_gej_double(&tmp, &groupj[i]);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_double_var(&tmp, &groupj[i], NULL);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* Check negation */
2020-09-15 01:39:26 +00:00
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ge tmp;
rustsecp256k1_v0_9_0_gej tmpj;
rustsecp256k1_v0_9_0_ge_neg(&tmp, &group[i]);
2020-09-15 01:39:26 +00:00
ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp);
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_neg(&tmpj, &groupj[i]);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj);
}
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_ecmult(const rustsecp256k1_v0_9_0_ge *group, const rustsecp256k1_v0_9_0_gej *groupj) {
int i, j, r_log;
2020-09-15 01:39:26 +00:00
uint64_t iter = 0;
for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) {
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
if (skip_section(&iter)) continue;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej tmp;
rustsecp256k1_v0_9_0_scalar na, ng;
rustsecp256k1_v0_9_0_scalar_set_int(&na, i);
rustsecp256k1_v0_9_0_scalar_set_int(&ng, j);
2020-09-15 01:39:26 +00:00
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecmult(&tmp, &groupj[r_log], &na, &ng);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
2023-09-27 18:37:09 +00:00
}
}
}
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
int ret;
rustsecp256k1_v0_9_0_gej tmp;
rustsecp256k1_v0_9_0_fe xn, xd, tmpf;
rustsecp256k1_v0_9_0_scalar ng;
if (skip_section(&iter)) continue;
rustsecp256k1_v0_9_0_scalar_set_int(&ng, j);
/* Test rustsecp256k1_v0_9_0_ecmult_const. */
rustsecp256k1_v0_9_0_ecmult_const(&tmp, &group[i], &ng);
ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp);
if (i != 0 && j != 0) {
/* Test rustsecp256k1_v0_9_0_ecmult_const_xonly with all curve X coordinates, and xd=NULL. */
ret = rustsecp256k1_v0_9_0_ecmult_const_xonly(&tmpf, &group[i].x, NULL, &ng, 0);
CHECK(ret);
CHECK(rustsecp256k1_v0_9_0_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x));
/* Test rustsecp256k1_v0_9_0_ecmult_const_xonly with all curve X coordinates, with random xd. */
random_fe_non_zero(&xd);
rustsecp256k1_v0_9_0_fe_mul(&xn, &xd, &group[i].x);
ret = rustsecp256k1_v0_9_0_ecmult_const_xonly(&tmpf, &xn, &xd, &ng, 0);
CHECK(ret);
CHECK(rustsecp256k1_v0_9_0_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x));
}
}
}
}
typedef struct {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar sc[2];
rustsecp256k1_v0_9_0_ge pt[2];
} ecmult_multi_data;
2023-09-27 18:37:09 +00:00
static int ecmult_multi_callback(rustsecp256k1_v0_9_0_scalar *sc, rustsecp256k1_v0_9_0_ge *pt, size_t idx, void *cbdata) {
ecmult_multi_data *data = (ecmult_multi_data*) cbdata;
*sc = data->sc[idx];
*pt = data->pt[idx];
return 1;
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_9_0_context *ctx, const rustsecp256k1_v0_9_0_ge *group) {
int i, j, k, x, y;
2020-09-15 01:39:26 +00:00
uint64_t iter = 0;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scratch *scratch = rustsecp256k1_v0_9_0_scratch_create(&ctx->error_callback, 4096);
2020-09-15 01:39:26 +00:00
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) {
if (skip_section(&iter)) continue;
for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej tmp;
rustsecp256k1_v0_9_0_scalar g_sc;
ecmult_multi_data data;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&data.sc[0], i);
rustsecp256k1_v0_9_0_scalar_set_int(&data.sc[1], j);
rustsecp256k1_v0_9_0_scalar_set_int(&g_sc, k);
data.pt[0] = group[x];
data.pt[1] = group[y];
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
2020-09-15 01:39:26 +00:00
ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
}
}
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scratch_destroy(&ctx->error_callback, scratch);
}
2023-09-27 18:37:09 +00:00
static void r_from_k(rustsecp256k1_v0_9_0_scalar *r, const rustsecp256k1_v0_9_0_ge *group, int k, int* overflow) {
rustsecp256k1_v0_9_0_fe x;
unsigned char x_bin[32];
k %= EXHAUSTIVE_TEST_ORDER;
x = group[k].x;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_fe_normalize(&x);
rustsecp256k1_v0_9_0_fe_get_b32(x_bin, &x);
rustsecp256k1_v0_9_0_scalar_set_b32(r, x_bin, overflow);
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_verify(const rustsecp256k1_v0_9_0_context *ctx, const rustsecp256k1_v0_9_0_ge *group) {
int s, r, msg, key;
2020-09-15 01:39:26 +00:00
uint64_t iter = 0;
for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ge nonconst_ge;
rustsecp256k1_v0_9_0_ecdsa_signature sig;
rustsecp256k1_v0_9_0_pubkey pk;
rustsecp256k1_v0_9_0_scalar sk_s, msg_s, r_s, s_s;
rustsecp256k1_v0_9_0_scalar s_times_k_s, msg_plus_r_times_sk_s;
int k, should_verify;
unsigned char msg32[32];
2020-09-15 01:39:26 +00:00
if (skip_section(&iter)) continue;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&s_s, s);
rustsecp256k1_v0_9_0_scalar_set_int(&r_s, r);
rustsecp256k1_v0_9_0_scalar_set_int(&msg_s, msg);
rustsecp256k1_v0_9_0_scalar_set_int(&sk_s, key);
/* Verify by hand */
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
2020-09-15 01:39:26 +00:00
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar check_x_s;
2020-09-15 01:39:26 +00:00
r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&s_times_k_s, k);
rustsecp256k1_v0_9_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
rustsecp256k1_v0_9_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
rustsecp256k1_v0_9_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
should_verify |= rustsecp256k1_v0_9_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
}
}
/* nb we have a "high s" rule */
2023-09-27 18:37:09 +00:00
should_verify &= !rustsecp256k1_v0_9_0_scalar_is_high(&s_s);
/* Verify by calling verify */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecdsa_signature_save(&sig, &r_s, &s_s);
memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_pubkey_save(&pk, &nonconst_ge);
rustsecp256k1_v0_9_0_scalar_get_b32(msg32, &msg_s);
CHECK(should_verify ==
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecdsa_verify(ctx, &sig, msg32, &pk));
}
}
}
}
}
2023-09-27 18:37:09 +00:00
static void test_exhaustive_sign(const rustsecp256k1_v0_9_0_context *ctx, const rustsecp256k1_v0_9_0_ge *group) {
int i, j, k;
2020-09-15 01:39:26 +00:00
uint64_t iter = 0;
/* Loop */
2020-09-15 01:39:26 +00:00
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
if (skip_section(&iter)) continue;
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
int ret;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecdsa_signature sig;
rustsecp256k1_v0_9_0_scalar sk, msg, r, s, expected_r;
unsigned char sk32[32], msg32[32];
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&msg, i);
rustsecp256k1_v0_9_0_scalar_set_int(&sk, j);
rustsecp256k1_v0_9_0_scalar_get_b32(sk32, &sk);
rustsecp256k1_v0_9_0_scalar_get_b32(msg32, &msg);
2023-09-27 18:37:09 +00:00
ret = rustsecp256k1_v0_9_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_9_0_nonce_function_smallint, &k);
CHECK(ret == 1);
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecdsa_signature_load(ctx, &r, &s, &sig);
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
2020-09-15 01:39:26 +00:00
r_from_k(&expected_r, group, k, NULL);
CHECK(r == expected_r);
2020-09-15 01:39:26 +00:00
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
break;
}
}
}
}
/* We would like to verify zero-knowledge here by counting how often every
* possible (s, r) tuple appears, but because the group order is larger
* than the field order, when coercing the x-values to scalar values, some
* appear more often than others, so we are actually not zero-knowledge.
* (This effect also appears in the real code, but the difference is on the
* order of 1/2^128th the field order, so the deviation is not useful to a
* computationally bounded attacker.)
*/
}
#ifdef ENABLE_MODULE_RECOVERY
Upgrade the vendored libsecp256k1 code `libsecp256k1` v0.2.0 was just released. Update the vendored code using `./vendor-libsecp.sh depend 0_8_0 21ffe4b` ``` git show 21ffe4b commit 21ffe4b22a9683cf24ae0763359e401d1284cc7a (tag: v0.2.0) Merge: 8c949f5 e025ccd Author: Pieter Wuille <pieter@wuille.net> Date: Mon Dec 12 17:00:52 2022 -0500 Merge bitcoin-core/secp256k1#1055: Prepare initial release e025ccdf7473702a76bb13d763dc096548ffefba release: prepare for initial release 0.2.0 (Jonas Nick) 6d1784a2e2c1c5a8d89ffb08a7f76fa15e84fff5 build: add missing files to EXTRA_DIST (Jonas Nick) 13bf1b6b324f2ed1c1fb4c8d17a4febd3556839e changelog: make order of change types match keepachangelog.com (Jonas Nick) b1f992a552785395d2e60b10862626fd11f66f84 doc: improve release process (Jonas Nick) ad39e2dc417f85c1577a6a6a9c519f5c60453def build: change package version to 0.1.0-dev (Jonas Nick) 90618e9263ebc2a0d73d487d6d94fd3af96b973c doc: move CHANGELOG from doc/ to root directory (Jonas Nick) Pull request description: Based on #964 ACKs for top commit: sipa: ACK e025ccdf7473702a76bb13d763dc096548ffefba Tree-SHA512: b9ab71d7362537d383a32b5e321ef44069f00e3e92340375bcd662267bc5a60c2bad60222998e6602cfac24ad65efb23d772eac37c86065036b90ef090b54c49 ``` Requires a new version of `secp256k1-sys`, use v0.8.0 - Update the `secp256k1-sys` manifest (including links field) - Update symbols to use 0_8_0 - Add a changelog entry - depend on the new version in `secp256k1` Which in turn requires a new version of `secp256k1`, use v0.26.0
2022-12-20 21:11:14 +00:00
#include "modules/recovery/tests_exhaustive_impl.h"
2020-09-15 01:39:26 +00:00
#endif
2020-09-15 01:39:26 +00:00
#ifdef ENABLE_MODULE_EXTRAKEYS
Upgrade the vendored libsecp256k1 code `libsecp256k1` v0.2.0 was just released. Update the vendored code using `./vendor-libsecp.sh depend 0_8_0 21ffe4b` ``` git show 21ffe4b commit 21ffe4b22a9683cf24ae0763359e401d1284cc7a (tag: v0.2.0) Merge: 8c949f5 e025ccd Author: Pieter Wuille <pieter@wuille.net> Date: Mon Dec 12 17:00:52 2022 -0500 Merge bitcoin-core/secp256k1#1055: Prepare initial release e025ccdf7473702a76bb13d763dc096548ffefba release: prepare for initial release 0.2.0 (Jonas Nick) 6d1784a2e2c1c5a8d89ffb08a7f76fa15e84fff5 build: add missing files to EXTRA_DIST (Jonas Nick) 13bf1b6b324f2ed1c1fb4c8d17a4febd3556839e changelog: make order of change types match keepachangelog.com (Jonas Nick) b1f992a552785395d2e60b10862626fd11f66f84 doc: improve release process (Jonas Nick) ad39e2dc417f85c1577a6a6a9c519f5c60453def build: change package version to 0.1.0-dev (Jonas Nick) 90618e9263ebc2a0d73d487d6d94fd3af96b973c doc: move CHANGELOG from doc/ to root directory (Jonas Nick) Pull request description: Based on #964 ACKs for top commit: sipa: ACK e025ccdf7473702a76bb13d763dc096548ffefba Tree-SHA512: b9ab71d7362537d383a32b5e321ef44069f00e3e92340375bcd662267bc5a60c2bad60222998e6602cfac24ad65efb23d772eac37c86065036b90ef090b54c49 ``` Requires a new version of `secp256k1-sys`, use v0.8.0 - Update the `secp256k1-sys` manifest (including links field) - Update symbols to use 0_8_0 - Add a changelog entry - depend on the new version in `secp256k1` Which in turn requires a new version of `secp256k1`, use v0.26.0
2022-12-20 21:11:14 +00:00
#include "modules/extrakeys/tests_exhaustive_impl.h"
2020-09-15 01:39:26 +00:00
#endif
2020-09-15 01:39:26 +00:00
#ifdef ENABLE_MODULE_SCHNORRSIG
Upgrade the vendored libsecp256k1 code `libsecp256k1` v0.2.0 was just released. Update the vendored code using `./vendor-libsecp.sh depend 0_8_0 21ffe4b` ``` git show 21ffe4b commit 21ffe4b22a9683cf24ae0763359e401d1284cc7a (tag: v0.2.0) Merge: 8c949f5 e025ccd Author: Pieter Wuille <pieter@wuille.net> Date: Mon Dec 12 17:00:52 2022 -0500 Merge bitcoin-core/secp256k1#1055: Prepare initial release e025ccdf7473702a76bb13d763dc096548ffefba release: prepare for initial release 0.2.0 (Jonas Nick) 6d1784a2e2c1c5a8d89ffb08a7f76fa15e84fff5 build: add missing files to EXTRA_DIST (Jonas Nick) 13bf1b6b324f2ed1c1fb4c8d17a4febd3556839e changelog: make order of change types match keepachangelog.com (Jonas Nick) b1f992a552785395d2e60b10862626fd11f66f84 doc: improve release process (Jonas Nick) ad39e2dc417f85c1577a6a6a9c519f5c60453def build: change package version to 0.1.0-dev (Jonas Nick) 90618e9263ebc2a0d73d487d6d94fd3af96b973c doc: move CHANGELOG from doc/ to root directory (Jonas Nick) Pull request description: Based on #964 ACKs for top commit: sipa: ACK e025ccdf7473702a76bb13d763dc096548ffefba Tree-SHA512: b9ab71d7362537d383a32b5e321ef44069f00e3e92340375bcd662267bc5a60c2bad60222998e6602cfac24ad65efb23d772eac37c86065036b90ef090b54c49 ``` Requires a new version of `secp256k1-sys`, use v0.8.0 - Update the `secp256k1-sys` manifest (including links field) - Update symbols to use 0_8_0 - Add a changelog entry - depend on the new version in `secp256k1` Which in turn requires a new version of `secp256k1`, use v0.26.0
2022-12-20 21:11:14 +00:00
#include "modules/schnorrsig/tests_exhaustive_impl.h"
2020-09-15 01:39:26 +00:00
#endif
2023-09-27 18:37:09 +00:00
#ifdef ENABLE_MODULE_ELLSWIFT
#include "modules/ellswift/tests_exhaustive_impl.h"
#endif
2020-09-15 01:39:26 +00:00
int main(int argc, char** argv) {
int i;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej groupj[EXHAUSTIVE_TEST_ORDER];
rustsecp256k1_v0_9_0_ge group[EXHAUSTIVE_TEST_ORDER];
2020-09-15 01:39:26 +00:00
unsigned char rand32[32];
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_context *ctx;
2020-09-15 01:39:26 +00:00
/* Disable buffering for stdout to improve reliability of getting
* diagnostic information. Happens right at the start of main because
* setbuf must be used before any other operation on the stream. */
setbuf(stdout, NULL);
/* Also disable buffering for stderr because it's not guaranteed that it's
* unbuffered on all systems. */
setbuf(stderr, NULL);
printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER);
/* find iteration count */
if (argc > 1) {
count = strtol(argv[1], NULL, 0);
}
2020-09-15 01:39:26 +00:00
printf("test count = %i\n", count);
/* find random seed */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_testrand_init(argc > 2 ? argv[2] : NULL);
2020-09-15 01:39:26 +00:00
/* set up split processing */
if (argc > 4) {
num_cores = strtol(argv[3], NULL, 0);
this_core = strtol(argv[4], NULL, 0);
if (num_cores < 1 || this_core >= num_cores) {
fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]);
return 1;
}
2020-09-15 01:39:26 +00:00
printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1);
}
/* Recreate the ecmult{,_gen} tables using the right generator (as selected via EXHAUSTIVE_TEST_ORDER) */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_ecmult_gen_compute_table(&rustsecp256k1_v0_9_0_ecmult_gen_prec_table[0][0], &rustsecp256k1_v0_9_0_ge_const_g, ECMULT_GEN_PREC_BITS);
rustsecp256k1_v0_9_0_ecmult_compute_two_tables(rustsecp256k1_v0_9_0_pre_g, rustsecp256k1_v0_9_0_pre_g_128, WINDOW_G, &rustsecp256k1_v0_9_0_ge_const_g);
2020-09-15 01:39:26 +00:00
while (count--) {
/* Build context */
2023-09-27 18:37:09 +00:00
ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_9_0_testrand256(rand32);
CHECK(rustsecp256k1_v0_9_0_context_randomize(ctx, rand32));
2020-09-15 01:39:26 +00:00
/* Generate the entire group */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_set_infinity(&groupj[0]);
rustsecp256k1_v0_9_0_ge_set_gej(&group[0], &groupj[0]);
2020-09-15 01:39:26 +00:00
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_9_0_ge_const_g);
rustsecp256k1_v0_9_0_ge_set_gej(&group[i], &groupj[i]);
2020-09-15 01:39:26 +00:00
if (count != 0) {
/* Set a different random z-value for each Jacobian point, except z=1
is used in the last iteration. */
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_fe z;
2020-09-15 01:39:26 +00:00
random_fe(&z);
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_gej_rescale(&groupj[i], &z);
2020-09-15 01:39:26 +00:00
}
2020-09-15 01:39:26 +00:00
/* Verify against ecmult_gen */
{
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar scalar_i;
rustsecp256k1_v0_9_0_gej generatedj;
rustsecp256k1_v0_9_0_ge generated;
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_scalar_set_int(&scalar_i, i);
rustsecp256k1_v0_9_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
rustsecp256k1_v0_9_0_ge_set_gej(&generated, &generatedj);
2020-09-15 01:39:26 +00:00
CHECK(group[i].infinity == 0);
CHECK(generated.infinity == 0);
2023-09-27 18:37:09 +00:00
CHECK(rustsecp256k1_v0_9_0_fe_equal(&generated.x, &group[i].x));
CHECK(rustsecp256k1_v0_9_0_fe_equal(&generated.y, &group[i].y));
2020-09-15 01:39:26 +00:00
}
}
2020-09-15 01:39:26 +00:00
/* Run the tests */
test_exhaustive_endomorphism(group);
test_exhaustive_addition(group, groupj);
test_exhaustive_ecmult(group, groupj);
2020-09-15 01:39:26 +00:00
test_exhaustive_ecmult_multi(ctx, group);
test_exhaustive_sign(ctx, group);
test_exhaustive_verify(ctx, group);
#ifdef ENABLE_MODULE_RECOVERY
2020-09-15 01:39:26 +00:00
test_exhaustive_recovery(ctx, group);
#endif
#ifdef ENABLE_MODULE_EXTRAKEYS
test_exhaustive_extrakeys(ctx, group);
#endif
#ifdef ENABLE_MODULE_SCHNORRSIG
test_exhaustive_schnorrsig(ctx);
#endif
2023-09-27 18:37:09 +00:00
#ifdef ENABLE_MODULE_ELLSWIFT
/* The ellswift algorithm does have additional edge cases when operating on
* curves of even order, which are not included in the code as secp256k1 is
* of odd order. Skip the ellswift tests if the used exhaustive tests curve
* is even-ordered accordingly. */
#if !EXHAUSTIVE_TEST_CURVE_HAS_EVEN_ORDER
test_exhaustive_ellswift(ctx, group);
#endif
#endif
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_context_destroy(ctx);
2020-09-15 01:39:26 +00:00
}
2023-09-27 18:37:09 +00:00
rustsecp256k1_v0_9_0_testrand_finish();
2020-09-15 01:39:26 +00:00
printf("no problems found\n");
return 0;
}