From 55fab77029b0bc6bbbaeb38411888b50932b6fb0 Mon Sep 17 00:00:00 2001 From: Steven Roose Date: Mon, 21 Oct 2019 17:06:23 +0200 Subject: [PATCH] Add vendor script to prefix secp256k1 sources --- secp256k1-sys/depend/scratch_impl.h.patch | 26 + .../depend/secp256k1-HEAD-revision.txt | 2 + secp256k1-sys/depend/secp256k1.c.patch | 43 + secp256k1-sys/depend/secp256k1.h.patch | 22 + secp256k1-sys/depend/secp256k1/Makefile.am | 20 +- secp256k1-sys/depend/secp256k1/configure.ac | 2 +- .../secp256k1/contrib/lax_der_parsing.c | 8 +- .../secp256k1/contrib/lax_der_parsing.h | 10 +- .../contrib/lax_der_privatekey_parsing.c | 14 +- .../contrib/lax_der_privatekey_parsing.h | 6 +- .../depend/secp256k1/include/secp256k1.h | 242 +- .../depend/secp256k1/include/secp256k1_ecdh.h | 20 +- .../include/secp256k1_preallocated.h | 54 +- .../secp256k1/include/secp256k1_recovery.h | 44 +- .../depend/secp256k1/sage/secp256k1.sage | 50 +- .../secp256k1/src/asm/field_10x26_arm.s | 16 +- .../depend/secp256k1/src/bench_ecdh.c | 10 +- .../depend/secp256k1/src/bench_ecmult.c | 96 +- .../depend/secp256k1/src/bench_internal.c | 122 +- .../depend/secp256k1/src/bench_recover.c | 16 +- .../depend/secp256k1/src/bench_sign.c | 12 +- .../depend/secp256k1/src/bench_verify.c | 28 +- secp256k1-sys/depend/secp256k1/src/ecdsa.h | 8 +- .../depend/secp256k1/src/ecdsa_impl.h | 124 +- secp256k1-sys/depend/secp256k1/src/eckey.h | 12 +- .../depend/secp256k1/src/eckey_impl.h | 74 +- secp256k1-sys/depend/secp256k1/src/ecmult.h | 22 +- .../depend/secp256k1/src/ecmult_const.h | 2 +- .../depend/secp256k1/src/ecmult_const_impl.h | 118 +- .../depend/secp256k1/src/ecmult_gen.h | 22 +- .../depend/secp256k1/src/ecmult_gen_impl.h | 134 +- .../depend/secp256k1/src/ecmult_impl.h | 522 +-- secp256k1-sys/depend/secp256k1/src/field.h | 60 +- .../depend/secp256k1/src/field_10x26.h | 4 +- .../depend/secp256k1/src/field_10x26_impl.h | 96 +- .../depend/secp256k1/src/field_5x52.h | 4 +- .../secp256k1/src/field_5x52_asm_impl.h | 4 +- .../depend/secp256k1/src/field_5x52_impl.h | 88 +- .../secp256k1/src/field_5x52_int128_impl.h | 4 +- .../depend/secp256k1/src/field_impl.h | 212 +- .../depend/secp256k1/src/gen_context.c | 12 +- secp256k1-sys/depend/secp256k1/src/group.h | 84 +- .../depend/secp256k1/src/group_impl.h | 546 +-- secp256k1-sys/depend/secp256k1/src/hash.h | 26 +- .../depend/secp256k1/src/hash_impl.h | 108 +- .../src/java/org/bitcoin/NativeSecp256k1.java | 50 +- .../java/org/bitcoin/Secp256k1Context.java | 4 +- .../src/java/org_bitcoin_NativeSecp256k1.c | 108 +- .../src/java/org_bitcoin_NativeSecp256k1.h | 52 +- .../src/java/org_bitcoin_Secp256k1Context.c | 4 +- .../src/java/org_bitcoin_Secp256k1Context.h | 4 +- .../src/modules/ecdh/Makefile.am.include | 2 +- .../secp256k1/src/modules/ecdh/main_impl.h | 44 +- .../secp256k1/src/modules/ecdh/tests_impl.h | 64 +- .../src/modules/recovery/Makefile.am.include | 2 +- .../src/modules/recovery/main_impl.h | 138 +- .../src/modules/recovery/tests_impl.h | 266 +- secp256k1-sys/depend/secp256k1/src/num.h | 32 +- secp256k1-sys/depend/secp256k1/src/num_gmp.h | 2 +- .../depend/secp256k1/src/num_gmp_impl.h | 84 +- secp256k1-sys/depend/secp256k1/src/scalar.h | 52 +- .../depend/secp256k1/src/scalar_4x64.h | 2 +- .../depend/secp256k1/src/scalar_4x64_impl.h | 78 +- .../depend/secp256k1/src/scalar_8x32.h | 2 +- .../depend/secp256k1/src/scalar_8x32_impl.h | 78 +- .../depend/secp256k1/src/scalar_impl.h | 228 +- .../depend/secp256k1/src/scalar_low.h | 2 +- .../depend/secp256k1/src/scalar_low_impl.h | 46 +- secp256k1-sys/depend/secp256k1/src/scratch.h | 18 +- .../depend/secp256k1/src/scratch_impl.h | 18 +- .../depend/secp256k1/src/secp256k1.c | 442 +-- secp256k1-sys/depend/secp256k1/src/testrand.h | 14 +- .../depend/secp256k1/src/testrand_impl.h | 56 +- secp256k1-sys/depend/secp256k1/src/tests.c | 3070 ++++++++--------- .../depend/secp256k1/src/tests_exhaustive.c | 310 +- secp256k1-sys/depend/secp256k1/src/util.h | 4 +- secp256k1-sys/depend/util.h.patch | 17 + secp256k1-sys/src/lib.rs | 46 +- secp256k1-sys/src/recovery.rs | 5 + secp256k1-sys/vendor-libsecp.sh | 68 + 80 files changed, 4417 insertions(+), 4144 deletions(-) create mode 100644 secp256k1-sys/depend/scratch_impl.h.patch create mode 100644 secp256k1-sys/depend/secp256k1-HEAD-revision.txt create mode 100644 secp256k1-sys/depend/secp256k1.c.patch create mode 100644 secp256k1-sys/depend/secp256k1.h.patch create mode 100644 secp256k1-sys/depend/util.h.patch create mode 100755 secp256k1-sys/vendor-libsecp.sh diff --git a/secp256k1-sys/depend/scratch_impl.h.patch b/secp256k1-sys/depend/scratch_impl.h.patch new file mode 100644 index 0000000..6e72191 --- /dev/null +++ b/secp256k1-sys/depend/scratch_impl.h.patch @@ -0,0 +1,26 @@ +13,37d12 +< static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) { +< const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; +< void *alloc = checked_malloc(error_callback, base_alloc + size); +< secp256k1_scratch* ret = (secp256k1_scratch *)alloc; +< if (ret != NULL) { +< memset(ret, 0, sizeof(*ret)); +< memcpy(ret->magic, "scratch", 8); +< ret->data = (void *) ((char *) alloc + base_alloc); +< ret->max_size = size; +< } +< return ret; +< } +< +< static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) { +< if (scratch != NULL) { +< VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ +< if (memcmp(scratch->magic, "scratch", 8) != 0) { +< secp256k1_callback_call(error_callback, "invalid scratch space"); +< return; +< } +< memset(scratch->magic, 0, sizeof(scratch->magic)); +< free(scratch); +< } +< } +< diff --git a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt new file mode 100644 index 0000000..94db77f --- /dev/null +++ b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt @@ -0,0 +1,2 @@ +# This file was automatically created by ./vendor-libsecp.sh +143dc6e9ee31852a60321b23eea407d2006171da diff --git a/secp256k1-sys/depend/secp256k1.c.patch b/secp256k1-sys/depend/secp256k1.c.patch new file mode 100644 index 0000000..c39705a --- /dev/null +++ b/secp256k1-sys/depend/secp256k1.c.patch @@ -0,0 +1,43 @@ +139,149d138 +< secp256k1_context* secp256k1_context_create(unsigned int flags) { +< size_t const prealloc_size = secp256k1_context_preallocated_size(flags); +< secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size); +< if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) { +< free(ctx); +< return NULL; +< } +< +< return ctx; +< } +< +164,174d152 +< secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) { +< secp256k1_context* ret; +< size_t prealloc_size; +< +< VERIFY_CHECK(ctx != NULL); +< prealloc_size = secp256k1_context_preallocated_clone_size(ctx); +< ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size); +< ret = secp256k1_context_preallocated_clone(ctx, ret); +< return ret; +< } +< +183,189d160 +< void secp256k1_context_destroy(secp256k1_context* ctx) { +< if (ctx != NULL) { +< secp256k1_context_preallocated_destroy(ctx); +< free(ctx); +< } +< } +< +206,215d176 +< } +< +< secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* ctx, size_t max_size) { +< VERIFY_CHECK(ctx != NULL); +< return secp256k1_scratch_create(&ctx->error_callback, max_size); +< } +< +< void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) { +< VERIFY_CHECK(ctx != NULL); +< secp256k1_scratch_destroy(&ctx->error_callback, scratch); diff --git a/secp256k1-sys/depend/secp256k1.h.patch b/secp256k1-sys/depend/secp256k1.h.patch new file mode 100644 index 0000000..bfd10ac --- /dev/null +++ b/secp256k1-sys/depend/secp256k1.h.patch @@ -0,0 +1,22 @@ +202,204d201 +< SECP256K1_API secp256k1_context* secp256k1_context_create( +< unsigned int flags +< ) SECP256K1_WARN_UNUSED_RESULT; +215,217d211 +< SECP256K1_API secp256k1_context* secp256k1_context_clone( +< const secp256k1_context* ctx +< ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; +232,234d225 +< SECP256K1_API void secp256k1_context_destroy( +< secp256k1_context* ctx +< ); +311,314d301 +< SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space* secp256k1_scratch_space_create( +< const secp256k1_context* ctx, +< size_t size +< ) SECP256K1_ARG_NONNULL(1); +322,325d308 +< SECP256K1_API void secp256k1_scratch_space_destroy( +< const secp256k1_context* ctx, +< secp256k1_scratch_space* scratch +< ) SECP256K1_ARG_NONNULL(1); diff --git a/secp256k1-sys/depend/secp256k1/Makefile.am b/secp256k1-sys/depend/secp256k1/Makefile.am index 21df09f..1c04e8b 100644 --- a/secp256k1-sys/depend/secp256k1/Makefile.am +++ b/secp256k1-sys/depend/secp256k1/Makefile.am @@ -2,13 +2,13 @@ ACLOCAL_AMFLAGS = -I build-aux/m4 lib_LTLIBRARIES = libsecp256k1.la if USE_JNI -JNI_LIB = libsecp256k1_jni.la +JNI_LIB = librustsecp256k1_v0_1_0_jni.la noinst_LTLIBRARIES = $(JNI_LIB) else JNI_LIB = endif include_HEADERS = include/secp256k1.h -include_HEADERS += include/secp256k1_preallocated.h +include_HEADERS += include/rustsecp256k1_v0_1_0_preallocated.h noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h @@ -58,7 +58,7 @@ noinst_HEADERS += contrib/lax_der_privatekey_parsing.h noinst_HEADERS += contrib/lax_der_privatekey_parsing.c if USE_EXTERNAL_ASM -COMMON_LIB = libsecp256k1_common.la +COMMON_LIB = librustsecp256k1_v0_1_0_common.la noinst_LTLIBRARIES = $(COMMON_LIB) else COMMON_LIB = @@ -69,16 +69,16 @@ pkgconfig_DATA = libsecp256k1.pc if USE_EXTERNAL_ASM if USE_ASM_ARM -libsecp256k1_common_la_SOURCES = src/asm/field_10x26_arm.s +librustsecp256k1_v0_1_0_common_la_SOURCES = src/asm/field_10x26_arm.s endif endif -libsecp256k1_la_SOURCES = src/secp256k1.c -libsecp256k1_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) -libsecp256k1_la_LIBADD = $(JNI_LIB) $(SECP_LIBS) $(COMMON_LIB) +librustsecp256k1_v0_1_0_la_SOURCES = src/secp256k1.c +librustsecp256k1_v0_1_0_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) +librustsecp256k1_v0_1_0_la_LIBADD = $(JNI_LIB) $(SECP_LIBS) $(COMMON_LIB) -libsecp256k1_jni_la_SOURCES = src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c -libsecp256k1_jni_la_CPPFLAGS = -DSECP256K1_BUILD $(JNI_INCLUDES) +librustsecp256k1_v0_1_0_jni_la_SOURCES = src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c +librustsecp256k1_v0_1_0_jni_la_CPPFLAGS = -DSECP256K1_BUILD $(JNI_INCLUDES) noinst_PROGRAMS = if USE_BENCHMARK @@ -161,7 +161,7 @@ gen_%.o: src/gen_%.c $(gen_context_BIN): $(gen_context_OBJECTS) $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ -$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h +$(librustsecp256k1_v0_1_0_la_OBJECTS): src/ecmult_static_context.h $(tests_OBJECTS): src/ecmult_static_context.h $(bench_internal_OBJECTS): src/ecmult_static_context.h $(bench_ecmult_OBJECTS): src/ecmult_static_context.h diff --git a/secp256k1-sys/depend/secp256k1/configure.ac b/secp256k1-sys/depend/secp256k1/configure.ac index b8340b7..f243e30 100644 --- a/secp256k1-sys/depend/secp256k1/configure.ac +++ b/secp256k1-sys/depend/secp256k1/configure.ac @@ -140,7 +140,7 @@ AC_ARG_ENABLE(external_default_callbacks, [use_external_default_callbacks=no]) AC_ARG_ENABLE(jni, - AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]), + AS_HELP_STRING([--enable-jni],[enable librustsecp256k1_v0_1_0_jni [default=no]]), [use_jni=$enableval], [use_jni=no]) diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c index 5b141a9..b595ba6 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c @@ -9,7 +9,7 @@ #include "lax_der_parsing.h" -int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { +int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { size_t rpos, rlen, spos, slen; size_t pos = 0; size_t lenbyte; @@ -17,7 +17,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ int overflow = 0; /* Hack to initialize sig with a correctly-parsed but invalid signature. */ - secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); /* Sequence tag byte */ if (pos == inputlen || input[pos] != 0x30) { @@ -139,11 +139,11 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ } if (!overflow) { - overflow = !secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + overflow = !rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } if (overflow) { memset(tmpsig, 0, 64); - secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } return 1; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h index 7eaf63b..47be01a 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h @@ -26,8 +26,8 @@ * certain violations are easily supported. You may need to adapt it. * * Do not use this for new systems. Use well-defined DER or compact signatures - * instead if you have the choice (see secp256k1_ecdsa_signature_parse_der and - * secp256k1_ecdsa_signature_parse_compact). + * instead if you have the choice (see rustsecp256k1_v0_1_0_ecdsa_signature_parse_der and + * rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact). * * The supported violations are: * - All numbers are parsed as nonnegative integers, even though X.609-0207 @@ -77,9 +77,9 @@ extern "C" { * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -int ecdsa_signature_parse_der_lax( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, +int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c index c2e63b4..6ccaac6 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c @@ -9,7 +9,7 @@ #include "lax_der_privatekey_parsing.h" -int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { +int ec_privkey_import_der(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { const unsigned char *end = privkey + privkeylen; int lenb = 0; int len = 0; @@ -46,17 +46,17 @@ int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, co return 0; } memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); - if (!secp256k1_ec_seckey_verify(ctx, out32)) { + if (!rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; } return 1; } -int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { - secp256k1_pubkey pubkey; +int ec_privkey_export_der(const rustsecp256k1_v0_1_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { + rustsecp256k1_v0_1_0_pubkey pubkey; size_t pubkeylen = 0; - if (!secp256k1_ec_pubkey_create(ctx, &pubkey, key32)) { + if (!rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, key32)) { *privkeylen = 0; return 0; } @@ -80,7 +80,7 @@ int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey, memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 33; - secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); + rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } else { @@ -105,7 +105,7 @@ int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey, memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 65; - secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); + rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h index fece261..f7751cc 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h @@ -52,10 +52,10 @@ extern "C" { * simple 32-byte private keys are sufficient. * * Note that this function does not guarantee correct DER output. It is - * guaranteed to be parsable by secp256k1_ec_privkey_import_der + * guaranteed to be parsable by rustsecp256k1_v0_1_0_ec_privkey_import_der */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( - const secp256k1_context* ctx, + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *seckey, @@ -77,7 +77,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( * key. */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der( - const secp256k1_context* ctx, + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey, const unsigned char *privkey, size_t privkeylen diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h b/secp256k1-sys/depend/secp256k1/include/secp256k1.h index 78ac6bf..808d1d7 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h @@ -35,13 +35,13 @@ extern "C" { * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * secp256k1_context_destroy, secp256k1_context_preallocated_destroy, - * and secp256k1_context_randomize. + * rustsecp256k1_v0_1_0_context_destroy, rustsecp256k1_v0_1_0_context_preallocated_destroy, + * and rustsecp256k1_v0_1_0_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct secp256k1_context_struct secp256k1_context; +typedef struct rustsecp256k1_v0_1_0_context_struct rustsecp256k1_v0_1_0_context; /** Opaque data structure that holds rewriteable "scratch space" * @@ -54,7 +54,7 @@ typedef struct secp256k1_context_struct secp256k1_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space; +typedef struct rustsecp256k1_v0_1_0_scratch_space_struct rustsecp256k1_v0_1_0_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * @@ -62,11 +62,11 @@ typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space; * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. + * comparison, use rustsecp256k1_v0_1_0_ec_pubkey_serialize and rustsecp256k1_v0_1_0_ec_pubkey_parse. */ typedef struct { unsigned char data[64]; -} secp256k1_pubkey; +} rustsecp256k1_v0_1_0_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -74,12 +74,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the secp256k1_ecdsa_signature_serialize_* and - * secp256k1_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_1_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_1_0_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} secp256k1_ecdsa_signature; +} rustsecp256k1_v0_1_0_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -97,7 +97,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*secp256k1_nonce_function)( +typedef int (*rustsecp256k1_v0_1_0_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -164,13 +164,13 @@ typedef int (*secp256k1_nonce_function)( #define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to secp256k1_context_create, secp256k1_context_preallocated_size, and - * secp256k1_context_preallocated_create. */ +/** Flags to pass to rustsecp256k1_v0_1_0_context_create, rustsecp256k1_v0_1_0_context_preallocated_size, and + * rustsecp256k1_v0_1_0_context_preallocated_create. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) #define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) -/** Flag to pass to secp256k1_ec_pubkey_serialize and secp256k1_ec_privkey_export. */ +/** Flag to pass to rustsecp256k1_v0_1_0_ec_pubkey_serialize and rustsecp256k1_v0_1_0_ec_privkey_export. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -186,7 +186,43 @@ typedef int (*secp256k1_nonce_function)( * API consistency, but currently do not require expensive precomputations or dynamic * allocations. */ -SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp; +SECP256K1_API extern const rustsecp256k1_v0_1_0_context *rustsecp256k1_v0_1_0_context_no_precomp; + +/** Create a secp256k1 context object (in dynamically allocated memory). + * + * This function uses malloc to allocate memory. It is guaranteed that malloc is + * called at most once for every call of this function. If you need to avoid dynamic + * memory allocation entirely, see the functions in rustsecp256k1_v0_1_0_preallocated.h. + * + * Returns: a newly created context object. + * In: flags: which parts of the context to initialize. + * + * See also rustsecp256k1_v0_1_0_context_randomize. + */ + +/** Copy a secp256k1 context object (into dynamically allocated memory). + * + * This function uses malloc to allocate memory. It is guaranteed that malloc is + * called at most once for every call of this function. If you need to avoid dynamic + * memory allocation entirely, see the functions in rustsecp256k1_v0_1_0_preallocated.h. + * + * Returns: a newly created context object. + * Args: ctx: an existing context to copy (cannot be NULL) + */ + +/** Destroy a secp256k1 context object (created in dynamically allocated memory). + * + * The context pointer may not be used afterwards. + * + * The context to destroy must have been created using rustsecp256k1_v0_1_0_context_create + * or rustsecp256k1_v0_1_0_context_clone. If the context has instead been created using + * rustsecp256k1_v0_1_0_context_preallocated_create or rustsecp256k1_v0_1_0_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_1_0_context_preallocated_destroy must + * be used instead. + * + * Args: ctx: an existing context to destroy, constructed using + * rustsecp256k1_v0_1_0_context_create or rustsecp256k1_v0_1_0_context_clone + */ /** Set a callback function to be called when an illegal argument is passed to * an API call. It will only trigger for violations that are mentioned @@ -209,11 +245,11 @@ SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp; * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void secp256k1_default_illegal_callback_fn(const char* message, void* data); - * - void secp256k1_default_error_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_1_0_default_illegal_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_1_0_default_error_callback_fn(const char* message, void* data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using secp256k1_context_set_illegal_callback or - * secp256k1_context_set_illegal_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_1_0_context_set_illegal_callback or + * rustsecp256k1_v0_1_0_context_set_illegal_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -223,10 +259,10 @@ SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp; * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above. * - * See also secp256k1_context_set_error_callback. + * See also rustsecp256k1_v0_1_0_context_set_error_callback. */ -SECP256K1_API void secp256k1_context_set_illegal_callback( - secp256k1_context* ctx, +SECP256K1_API void rustsecp256k1_v0_1_0_context_set_illegal_callback( + rustsecp256k1_v0_1_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -237,25 +273,39 @@ SECP256K1_API void secp256k1_context_set_illegal_callback( * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see secp256k1_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_1_0_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object (cannot be NULL) * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see secp256k1_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_1_0_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above. * - * See also secp256k1_context_set_illegal_callback. + * See also rustsecp256k1_v0_1_0_context_set_illegal_callback. */ -SECP256K1_API void secp256k1_context_set_error_callback( - secp256k1_context* ctx, +SECP256K1_API void rustsecp256k1_v0_1_0_context_set_error_callback( + rustsecp256k1_v0_1_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); +/** Create a secp256k1 scratch space object. + * + * Returns: a newly created scratch space. + * Args: ctx: an existing context object (cannot be NULL) + * In: size: amount of memory to be available as scratch space. Some extra + * (<100 bytes) will be allocated for extra accounting. + */ + +/** Destroy a secp256k1 scratch space. + * + * The pointer may not be used afterwards. + * Args: ctx: a secp256k1 context object. + * scratch: space to destroy + */ /** Parse a variable-length public key into the pubkey object. * @@ -271,9 +321,9 @@ SECP256K1_API void secp256k1_context_set_error_callback( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse( - const secp256k1_context* ctx, - secp256k1_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_parse( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey* pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -288,16 +338,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse( * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a secp256k1_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_1_0_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int secp256k1_ec_pubkey_serialize( - const secp256k1_context* ctx, +SECP256K1_API int rustsecp256k1_v0_1_0_ec_pubkey_serialize( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, size_t *outputlen, - const secp256k1_pubkey* pubkey, + const rustsecp256k1_v0_1_0_pubkey* pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -316,9 +366,9 @@ SECP256K1_API int secp256k1_ec_pubkey_serialize( * S are zero, the resulting sig value is guaranteed to fail validation for any * message and public key. */ -SECP256K1_API int secp256k1_ecdsa_signature_parse_compact( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -337,9 +387,9 @@ SECP256K1_API int secp256k1_ecdsa_signature_parse_compact( * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int secp256k1_ecdsa_signature_parse_der( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -355,11 +405,11 @@ SECP256K1_API int secp256k1_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int secp256k1_ecdsa_signature_serialize_der( - const secp256k1_context* ctx, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, size_t *outputlen, - const secp256k1_ecdsa_signature* sig + const rustsecp256k1_v0_1_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -369,12 +419,12 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See secp256k1_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact( - const secp256k1_context* ctx, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output64, - const secp256k1_ecdsa_signature* sig + const rustsecp256k1_v0_1_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. @@ -390,16 +440,16 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply secp256k1_ecdsa_signature_normalize to the signature prior to + * rule, apply rustsecp256k1_v0_1_0_ecdsa_signature_normalize to the signature prior to * validation, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify( - const secp256k1_context* ctx, - const secp256k1_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdsa_verify( + const rustsecp256k1_v0_1_0_context* ctx, + const rustsecp256k1_v0_1_0_ecdsa_signature *sig, const unsigned char *msg32, - const secp256k1_pubkey *pubkey + const rustsecp256k1_v0_1_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -439,25 +489,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify( * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The secp256k1_ecdsa_sign function will by default create signatures in the - * lower-S form, and secp256k1_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_1_0_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_1_0_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * secp256k1_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_1_0_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int secp256k1_ecdsa_signature_normalize( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature *sigout, - const secp256k1_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_normalize( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature *sigout, + const rustsecp256k1_v0_1_0_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * extra entropy. */ -SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_rfc6979; +SECP256K1_API extern const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_rfc6979; -/** A default safe nonce generation function (currently equal to secp256k1_nonce_function_rfc6979). */ -SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_default; +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_1_0_nonce_function_rfc6979). */ +SECP256K1_API extern const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_default; /** Create an ECDSA signature. * @@ -467,18 +517,18 @@ SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_def * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) * In: msg32: the 32-byte message hash being signed (cannot be NULL) * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used + * noncefp:pointer to a nonce generation function. If NULL, rustsecp256k1_v0_1_0_nonce_function_default is used * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) * * The created signature is always in lower-S form. See - * secp256k1_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_1_0_ecdsa_signature_normalize for more details. */ -SECP256K1_API int secp256k1_ecdsa_sign( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_sign( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature *sig, const unsigned char *msg32, const unsigned char *seckey, - secp256k1_nonce_function noncefp, + rustsecp256k1_v0_1_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -489,8 +539,8 @@ SECP256K1_API int secp256k1_ecdsa_sign( * Args: ctx: pointer to a context object (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify( - const secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_seckey_verify( + const rustsecp256k1_v0_1_0_context* ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -502,9 +552,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify( * Out: pubkey: pointer to the created public key (cannot be NULL) * In: seckey: pointer to a 32-byte private key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_create( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -514,8 +564,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create( * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte private key to be negated (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_negate( - const secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_negate( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -525,9 +575,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_negate( * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_negate( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_negate( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a private key by adding tweak to it. @@ -539,8 +589,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_negate( * In/Out: seckey: pointer to a 32-byte private key. * In: tweak: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add( - const secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_tweak_add( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey, const unsigned char *tweak ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -555,9 +605,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add( * In/Out: pubkey: pointer to a public key object. * In: tweak: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_tweak_add( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *tweak ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -568,8 +618,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add( * In/Out: seckey: pointer to a 32-byte private key. * In: tweak: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul( - const secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_tweak_mul( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey, const unsigned char *tweak ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -582,9 +632,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul( * In/Out: pubkey: pointer to a public key obkect. * In: tweak: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *tweak ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -609,12 +659,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( * guaranteed and may change in the future. It is safe to call this function on * contexts not initialized for signing; then it will have no effect and return 1. * - * You should call this after secp256k1_context_create or - * secp256k1_context_clone (and secp256k1_context_preallocated_create or - * secp256k1_context_clone, resp.), and you may call this repeatedly afterwards. + * You should call this after rustsecp256k1_v0_1_0_context_create or + * rustsecp256k1_v0_1_0_context_clone (and rustsecp256k1_v0_1_0_context_preallocated_create or + * rustsecp256k1_v0_1_0_context_clone, resp.), and you may call this repeatedly afterwards. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( - secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_context_randomize( + rustsecp256k1_v0_1_0_context* ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -627,10 +677,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( * In: ins: pointer to array of pointers to public keys (cannot be NULL) * n: the number of public keys to add together (must be at least 1) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine( - const secp256k1_context* ctx, - secp256k1_pubkey *out, - const secp256k1_pubkey * const * ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_combine( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *out, + const rustsecp256k1_v0_1_0_pubkey * const * ins, size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h index df5fde2..72128c1 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h @@ -15,7 +15,7 @@ extern "C" { * y: pointer to a 32-byte y coordinate * data: Arbitrary data pointer that is passed through */ -typedef int (*secp256k1_ecdh_hash_function)( +typedef int (*rustsecp256k1_v0_1_0_ecdh_hash_function)( unsigned char *output, const unsigned char *x, const unsigned char *y, @@ -23,28 +23,28 @@ typedef int (*secp256k1_ecdh_hash_function)( ); /** An implementation of SHA256 hash function that applies to compressed public key. */ -SECP256K1_API extern const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_sha256; +SECP256K1_API extern const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_sha256; -/** A default ecdh hash function (currently equal to secp256k1_ecdh_hash_function_sha256). */ -SECP256K1_API extern const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_default; +/** A default ecdh hash function (currently equal to rustsecp256k1_v0_1_0_ecdh_hash_function_sha256). */ +SECP256K1_API extern const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_default; /** Compute an EC Diffie-Hellman secret in constant time * Returns: 1: exponentiation was successful * 0: scalar was invalid (zero or overflow) * Args: ctx: pointer to a context object (cannot be NULL) * Out: output: pointer to an array to be filled by the function - * In: pubkey: a pointer to a secp256k1_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_1_0_pubkey containing an * initialized public key * privkey: a 32-byte scalar with which to multiply the point - * hashfp: pointer to a hash function. If NULL, secp256k1_ecdh_hash_function_sha256 is used + * hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_1_0_ecdh_hash_function_sha256 is used * data: Arbitrary data pointer that is passed through */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdh( - const secp256k1_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdh( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, - const secp256k1_pubkey *pubkey, + const rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *privkey, - secp256k1_ecdh_hash_function hashfp, + rustsecp256k1_v0_1_0_ecdh_hash_function hashfp, void *data ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h index 0fb64a5..f88446c 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h @@ -16,8 +16,8 @@ extern "C" { * objects created by functions in secp256k1.h, i.e., they can be passed to any * API function that excepts a context object (see secp256k1.h for details). The * only exception is that context objects created by functions in this module - * must be destroyed using secp256k1_context_preallocated_destroy (in this - * module) instead of secp256k1_context_destroy (in secp256k1.h). + * must be destroyed using rustsecp256k1_v0_1_0_context_preallocated_destroy (in this + * module) instead of rustsecp256k1_v0_1_0_context_destroy (in secp256k1.h). * * It is guaranteed that functions in by this module will not call malloc or its * friends realloc, calloc, and free. @@ -27,24 +27,24 @@ extern "C" { * caller-provided memory. * * The purpose of this function is to determine how much memory must be provided - * to secp256k1_context_preallocated_create. + * to rustsecp256k1_v0_1_0_context_preallocated_create. * * Returns: the required size of the caller-provided memory block * In: flags: which parts of the context to initialize. */ -SECP256K1_API size_t secp256k1_context_preallocated_size( +SECP256K1_API size_t rustsecp256k1_v0_1_0_context_preallocated_size( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; /** Create a secp256k1 context object in caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, which begins with the call to this - * function and ends when a call to secp256k1_context_preallocated_destroy + * function and ends when a call to rustsecp256k1_v0_1_0_context_preallocated_destroy * (which destroys the context object again) returns. During the lifetime of the * context object, the caller is obligated not to access this block of memory, * i.e., the caller may not read or write the memory, e.g., by copying the memory @@ -54,14 +54,14 @@ SECP256K1_API size_t secp256k1_context_preallocated_size( * * Returns: a newly created context object. * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least secp256k1_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) * bytes, as detailed above (cannot be NULL) * flags: which parts of the context to initialize. * - * See also secp256k1_context_randomize (in secp256k1.h) - * and secp256k1_context_preallocated_destroy. + * See also rustsecp256k1_v0_1_0_context_randomize (in secp256k1.h) + * and rustsecp256k1_v0_1_0_context_preallocated_destroy. */ -SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create( +SECP256K1_API rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_create( void* prealloc, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; @@ -72,28 +72,28 @@ SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create( * Returns: the required size of the caller-provided memory block. * In: ctx: an existing context to copy (cannot be NULL) */ -SECP256K1_API size_t secp256k1_context_preallocated_clone_size( - const secp256k1_context* ctx +SECP256K1_API size_t rustsecp256k1_v0_1_0_context_preallocated_clone_size( + const rustsecp256k1_v0_1_0_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Copy a secp256k1 context object into caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, see the description of - * secp256k1_context_preallocated_create for details. + * rustsecp256k1_v0_1_0_context_preallocated_create for details. * * Returns: a newly created context object. * Args: ctx: an existing context to copy (cannot be NULL) * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least secp256k1_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) * bytes, as detailed above (cannot be NULL) */ -SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone( - const secp256k1_context* ctx, +SECP256K1_API rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_clone( + const rustsecp256k1_v0_1_0_context* ctx, void* prealloc ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; @@ -103,22 +103,22 @@ SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone( * The context pointer may not be used afterwards. * * The context to destroy must have been created using - * secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone. - * If the context has instead been created using secp256k1_context_create or - * secp256k1_context_clone, the behaviour is undefined. In that case, - * secp256k1_context_destroy must be used instead. + * rustsecp256k1_v0_1_0_context_preallocated_create or rustsecp256k1_v0_1_0_context_preallocated_clone. + * If the context has instead been created using rustsecp256k1_v0_1_0_context_create or + * rustsecp256k1_v0_1_0_context_clone, the behaviour is undefined. In that case, + * rustsecp256k1_v0_1_0_context_destroy must be used instead. * * If required, it is the responsibility of the caller to deallocate the block * of memory properly after this function returns, e.g., by calling free on the - * preallocated pointer given to secp256k1_context_preallocated_create or - * secp256k1_context_preallocated_clone. + * preallocated pointer given to rustsecp256k1_v0_1_0_context_preallocated_create or + * rustsecp256k1_v0_1_0_context_preallocated_clone. * * Args: ctx: an existing context to destroy, constructed using - * secp256k1_context_preallocated_create or - * secp256k1_context_preallocated_clone (cannot be NULL) + * rustsecp256k1_v0_1_0_context_preallocated_create or + * rustsecp256k1_v0_1_0_context_preallocated_clone (cannot be NULL) */ -SECP256K1_API void secp256k1_context_preallocated_destroy( - secp256k1_context* ctx +SECP256K1_API void rustsecp256k1_v0_1_0_context_preallocated_destroy( + rustsecp256k1_v0_1_0_context* ctx ); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h index cf6c5ed..5e07402 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h @@ -14,8 +14,8 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 65 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, use - * the secp256k1_ecdsa_signature_serialize_* and - * secp256k1_ecdsa_signature_parse_* functions. + * the rustsecp256k1_v0_1_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_1_0_ecdsa_signature_parse_* functions. * * Furthermore, it is guaranteed that identical signatures (including their * recoverability) will have identical representation, so they can be @@ -23,7 +23,7 @@ extern "C" { */ typedef struct { unsigned char data[65]; -} secp256k1_ecdsa_recoverable_signature; +} rustsecp256k1_v0_1_0_ecdsa_recoverable_signature; /** Parse a compact ECDSA signature (64 bytes + recovery id). * @@ -33,9 +33,9 @@ typedef struct { * In: input64: a pointer to a 64-byte compact signature * recid: the recovery id (0, 1, 2 or 3) */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_parse_compact( - const secp256k1_context* ctx, - secp256k1_ecdsa_recoverable_signature* sig, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -46,10 +46,10 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_parse_compact( * Out: sig: a pointer to a normal signature (cannot be NULL). * In: sigin: a pointer to a recoverable signature (cannot be NULL). */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_convert( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, - const secp256k1_ecdsa_recoverable_signature* sigin +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_signature* sig, + const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Serialize an ECDSA signature in compact format (64 bytes + recovery id). @@ -60,11 +60,11 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_convert( * recid: a pointer to an integer to hold the recovery id (can be NULL). * In: sig: a pointer to an initialized signature object (cannot be NULL) */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_serialize_compact( - const secp256k1_context* ctx, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact( + const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output64, int *recid, - const secp256k1_ecdsa_recoverable_signature* sig + const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Create a recoverable ECDSA signature. @@ -75,15 +75,15 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_serialize_compact( * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) * In: msg32: the 32-byte message hash being signed (cannot be NULL) * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used + * noncefp:pointer to a nonce generation function. If NULL, rustsecp256k1_v0_1_0_nonce_function_default is used * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) */ -SECP256K1_API int secp256k1_ecdsa_sign_recoverable( - const secp256k1_context* ctx, - secp256k1_ecdsa_recoverable_signature *sig, +SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_sign_recoverable( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *sig, const unsigned char *msg32, const unsigned char *seckey, - secp256k1_nonce_function noncefp, + rustsecp256k1_v0_1_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -96,10 +96,10 @@ SECP256K1_API int secp256k1_ecdsa_sign_recoverable( * In: sig: pointer to initialized signature that supports pubkey recovery (cannot be NULL) * msg32: the 32-byte message hash assumed to be signed (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_recover( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const secp256k1_ecdsa_recoverable_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdsa_recover( + const rustsecp256k1_v0_1_0_context* ctx, + rustsecp256k1_v0_1_0_pubkey *pubkey, + const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *sig, const unsigned char *msg32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/sage/secp256k1.sage b/secp256k1-sys/depend/secp256k1/sage/secp256k1.sage index a97e732..f6b00c4 100644 --- a/secp256k1-sys/depend/secp256k1/sage/secp256k1.sage +++ b/secp256k1-sys/depend/secp256k1/sage/secp256k1.sage @@ -5,8 +5,8 @@ import sys load("group_prover.sage") load("weierstrass_prover.sage") -def formula_secp256k1_gej_double_var(a): - """libsecp256k1's secp256k1_gej_double_var, used by various addition functions""" +def formula_rustsecp256k1_v0_1_0_gej_double_var(a): + """libsecp256k1's rustsecp256k1_v0_1_0_gej_double_var, used by various addition functions""" rz = a.Z * a.Y rz = rz * 2 t1 = a.X^2 @@ -29,8 +29,8 @@ def formula_secp256k1_gej_double_var(a): ry = ry + t2 return jacobianpoint(rx, ry, rz) -def formula_secp256k1_gej_add_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_var""" +def formula_rustsecp256k1_v0_1_0_gej_add_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_1_0_gej_add_var""" if branch == 0: return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -48,7 +48,7 @@ def formula_secp256k1_gej_add_var(branch, a, b): i = -s1 i = i + s2 if branch == 2: - r = formula_secp256k1_gej_double_var(a) + r = formula_rustsecp256k1_v0_1_0_gej_double_var(a) return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) if branch == 3: return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -71,8 +71,8 @@ def formula_secp256k1_gej_add_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_secp256k1_gej_add_ge_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_ge_var, which assume bz==1""" +def formula_rustsecp256k1_v0_1_0_gej_add_ge_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_1_0_gej_add_ge_var, which assume bz==1""" if branch == 0: return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -88,7 +88,7 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b): i = -s1 i = i + s2 if (branch == 2): - r = formula_secp256k1_gej_double_var(a) + r = formula_rustsecp256k1_v0_1_0_gej_double_var(a) return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if (branch == 3): return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -110,8 +110,8 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b): ry = ry + h3 return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_secp256k1_gej_add_zinv_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_zinv_var""" +def formula_rustsecp256k1_v0_1_0_gej_add_zinv_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_1_0_gej_add_zinv_var""" bzinv = b.Z^(-1) if branch == 0: return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a) @@ -134,7 +134,7 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b): i = -s1 i = i + s2 if branch == 2: - r = formula_secp256k1_gej_double_var(a) + r = formula_rustsecp256k1_v0_1_0_gej_double_var(a) return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if branch == 3: return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -157,8 +157,8 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_secp256k1_gej_add_ge(branch, a, b): - """libsecp256k1's secp256k1_gej_add_ge""" +def formula_rustsecp256k1_v0_1_0_gej_add_ge(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_1_0_gej_add_ge""" zeroes = {} nonzeroes = {} a_infinity = False @@ -229,8 +229,8 @@ def formula_secp256k1_gej_add_ge(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity()) return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz)) -def formula_secp256k1_gej_add_ge_old(branch, a, b): - """libsecp256k1's old secp256k1_gej_add_ge, which fails when ay+by=0 but ax!=bx""" +def formula_rustsecp256k1_v0_1_0_gej_add_ge_old(branch, a, b): + """libsecp256k1's old rustsecp256k1_v0_1_0_gej_add_ge, which fails when ay+by=0 but ax!=bx""" a_infinity = (branch & 1) != 0 zero = {} nonzero = {} @@ -292,15 +292,15 @@ def formula_secp256k1_gej_add_ge_old(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz)) if __name__ == "__main__": - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_ge_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_zinv_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_1_0_gej_add_ge) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_1_0_gej_add_ge_old) if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive": - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_ge_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_zinv_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_1_0_gej_add_ge, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_1_0_gej_add_ge_old, 43) diff --git a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s index 9a5bd06..17a6d79 100644 --- a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s +++ b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s @@ -27,8 +27,8 @@ Note: .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff .align 2 - .global secp256k1_fe_mul_inner - .type secp256k1_fe_mul_inner, %function + .global rustsecp256k1_v0_1_0_fe_mul_inner + .type rustsecp256k1_v0_1_0_fe_mul_inner, %function @ Arguments: @ r0 r Restrict: can overlap with a, not with b @ r1 a @@ -36,7 +36,7 @@ Note: @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -secp256k1_fe_mul_inner: +rustsecp256k1_v0_1_0_fe_mul_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -511,18 +511,18 @@ secp256k1_fe_mul_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner + .size rustsecp256k1_v0_1_0_fe_mul_inner, .-rustsecp256k1_v0_1_0_fe_mul_inner .align 2 - .global secp256k1_fe_sqr_inner - .type secp256k1_fe_sqr_inner, %function + .global rustsecp256k1_v0_1_0_fe_sqr_inner + .type rustsecp256k1_v0_1_0_fe_sqr_inner, %function @ Arguments: @ r0 r Can overlap with a @ r1 a @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -secp256k1_fe_sqr_inner: +rustsecp256k1_v0_1_0_fe_sqr_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -909,5 +909,5 @@ secp256k1_fe_sqr_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner + .size rustsecp256k1_v0_1_0_fe_sqr_inner, .-rustsecp256k1_v0_1_0_fe_sqr_inner diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c b/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c index c1dd5a6..02144a7 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c @@ -12,8 +12,8 @@ #include "bench.h" typedef struct { - secp256k1_context *ctx; - secp256k1_pubkey point; + rustsecp256k1_v0_1_0_context *ctx; + rustsecp256k1_v0_1_0_pubkey point; unsigned char scalar[32]; } bench_ecdh_data; @@ -29,11 +29,11 @@ static void bench_ecdh_setup(void* arg) { }; /* create a context with no capabilities */ - data->ctx = secp256k1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); + data->ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); for (i = 0; i < 32; i++) { data->scalar[i] = i + 1; } - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); } static void bench_ecdh(void* arg) { @@ -42,7 +42,7 @@ static void bench_ecdh(void* arg) { bench_ecdh_data *data = (bench_ecdh_data*)arg; for (i = 0; i < 20000; i++) { - CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); } } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c index 7b5d185..55b89a2 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c @@ -22,13 +22,13 @@ typedef struct { /* Setup once in advance */ - secp256k1_context* ctx; - secp256k1_scratch_space* scratch; - secp256k1_scalar* scalars; - secp256k1_ge* pubkeys; - secp256k1_scalar* seckeys; - secp256k1_gej* expected_output; - secp256k1_ecmult_multi_func ecmult_multi; + rustsecp256k1_v0_1_0_context* ctx; + rustsecp256k1_v0_1_0_scratch_space* scratch; + rustsecp256k1_v0_1_0_scalar* scalars; + rustsecp256k1_v0_1_0_ge* pubkeys; + rustsecp256k1_v0_1_0_scalar* seckeys; + rustsecp256k1_v0_1_0_gej* expected_output; + rustsecp256k1_v0_1_0_ecmult_multi_func ecmult_multi; /* Changes per test */ size_t count; @@ -39,15 +39,15 @@ typedef struct { size_t offset2; /* Test output. */ - secp256k1_gej* output; + rustsecp256k1_v0_1_0_gej* output; } bench_data; -static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) { +static int bench_callback(rustsecp256k1_v0_1_0_scalar* sc, rustsecp256k1_v0_1_0_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { *sc = data->scalars[data->offset1]; - *ge = secp256k1_ge_const_g; + *ge = rustsecp256k1_v0_1_0_ge_const_g; } else { *sc = data->scalars[(data->offset1 + idx) % POINTS]; *ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS]; @@ -82,14 +82,14 @@ static void bench_ecmult_teardown(void* arg) { size_t iter; /* Verify the results in teardown, to avoid doing comparisons while benchmarking. */ for (iter = 0; iter < iters; ++iter) { - secp256k1_gej tmp; - secp256k1_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); - CHECK(secp256k1_gej_is_infinity(&tmp)); + rustsecp256k1_v0_1_0_gej tmp; + rustsecp256k1_v0_1_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&tmp)); } } -static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) { - secp256k1_sha256 sha256; +static void generate_scalar(uint32_t num, rustsecp256k1_v0_1_0_scalar* scalar) { + rustsecp256k1_v0_1_0_sha256 sha256; unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; @@ -97,16 +97,16 @@ static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) { c[7] = num >> 8; c[8] = num >> 16; c[9] = num >> 24; - secp256k1_sha256_initialize(&sha256); - secp256k1_sha256_write(&sha256, c, sizeof(c)); - secp256k1_sha256_finalize(&sha256, buf); - secp256k1_scalar_set_b32(scalar, buf, &overflow); + rustsecp256k1_v0_1_0_sha256_initialize(&sha256); + rustsecp256k1_v0_1_0_sha256_write(&sha256, c, sizeof(c)); + rustsecp256k1_v0_1_0_sha256_finalize(&sha256, buf); + rustsecp256k1_v0_1_0_scalar_set_b32(scalar, buf, &overflow); CHECK(!overflow); } static void run_test(bench_data* data, size_t count, int includes_g) { char str[32]; - static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_1_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + ITERS / count; size_t iter; @@ -117,15 +117,15 @@ static void run_test(bench_data* data, size_t count, int includes_g) { data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; for (iter = 0; iter < iters; ++iter) { - secp256k1_scalar tmp; - secp256k1_scalar total = data->scalars[(data->offset1++) % POINTS]; + rustsecp256k1_v0_1_0_scalar tmp; + rustsecp256k1_v0_1_0_scalar total = data->scalars[(data->offset1++) % POINTS]; size_t i = 0; for (i = 0; i + 1 < count; ++i) { - secp256k1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); - secp256k1_scalar_add(&total, &total, &tmp); + rustsecp256k1_v0_1_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); + rustsecp256k1_v0_1_0_scalar_add(&total, &total, &tmp); } - secp256k1_scalar_negate(&total, &total); - secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total); + rustsecp256k1_v0_1_0_scalar_negate(&total, &total); + rustsecp256k1_v0_1_0_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total); } /* Run the benchmark. */ @@ -136,25 +136,25 @@ static void run_test(bench_data* data, size_t count, int includes_g) { int main(int argc, char **argv) { bench_data data; int i, p; - secp256k1_gej* pubkeys_gej; + rustsecp256k1_v0_1_0_gej* pubkeys_gej; size_t scratch_size; - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; - data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size); - data.ecmult_multi = secp256k1_ecmult_multi_var; + data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + scratch_size = rustsecp256k1_v0_1_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + data.scratch = rustsecp256k1_v0_1_0_scratch_space_create(data.ctx, scratch_size); + data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_multi_var; if (argc > 1) { if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); - data.ecmult_multi = secp256k1_ecmult_pippenger_batch_single; + data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { printf("Using strauss_wnaf:\n"); - data.ecmult_multi = secp256k1_ecmult_strauss_batch_single; + data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); - data.ecmult_multi = secp256k1_ecmult_multi_var; - secp256k1_scratch_space_destroy(data.ctx, data.scratch); + data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_multi_var; + rustsecp256k1_v0_1_0_scratch_space_destroy(data.ctx, data.scratch); data.scratch = NULL; } else { fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]); @@ -164,24 +164,24 @@ int main(int argc, char **argv) { } /* Allocate stuff */ - data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS); - data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS); - data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS); - data.expected_output = malloc(sizeof(secp256k1_gej) * (ITERS + 1)); - data.output = malloc(sizeof(secp256k1_gej) * (ITERS + 1)); + data.scalars = malloc(sizeof(rustsecp256k1_v0_1_0_scalar) * POINTS); + data.seckeys = malloc(sizeof(rustsecp256k1_v0_1_0_scalar) * POINTS); + data.pubkeys = malloc(sizeof(rustsecp256k1_v0_1_0_ge) * POINTS); + data.expected_output = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * (ITERS + 1)); + data.output = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * (ITERS + 1)); /* Generate a set of scalars, and private/public keypairs. */ - pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS); - secp256k1_gej_set_ge(&pubkeys_gej[0], &secp256k1_ge_const_g); - secp256k1_scalar_set_int(&data.seckeys[0], 1); + pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * POINTS); + rustsecp256k1_v0_1_0_gej_set_ge(&pubkeys_gej[0], &rustsecp256k1_v0_1_0_ge_const_g); + rustsecp256k1_v0_1_0_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - secp256k1_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL); - secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); + rustsecp256k1_v0_1_0_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL); + rustsecp256k1_v0_1_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); + rustsecp256k1_v0_1_0_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); free(pubkeys_gej); for (i = 1; i <= 8; ++i) { @@ -194,9 +194,9 @@ int main(int argc, char **argv) { } } if (data.scratch != NULL) { - secp256k1_scratch_space_destroy(data.ctx, data.scratch); + rustsecp256k1_v0_1_0_scratch_space_destroy(data.ctx, data.scratch); } - secp256k1_context_destroy(data.ctx); + rustsecp256k1_v0_1_0_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); free(data.seckeys); diff --git a/secp256k1-sys/depend/secp256k1/src/bench_internal.c b/secp256k1-sys/depend/secp256k1/src/bench_internal.c index a8f4e9e..d1a70b2 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_internal.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_internal.c @@ -19,10 +19,10 @@ #include "secp256k1.c" typedef struct { - secp256k1_scalar scalar_x, scalar_y; - secp256k1_fe fe_x, fe_y; - secp256k1_ge ge_x, ge_y; - secp256k1_gej gej_x, gej_y; + rustsecp256k1_v0_1_0_scalar scalar_x, scalar_y; + rustsecp256k1_v0_1_0_fe fe_x, fe_y; + rustsecp256k1_v0_1_0_ge ge_x, ge_y; + rustsecp256k1_v0_1_0_gej gej_x, gej_y; unsigned char data[64]; int wnaf[256]; } bench_inv; @@ -44,14 +44,14 @@ void bench_setup(void* arg) { 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3 }; - secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL); - secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL); - secp256k1_fe_set_b32(&data->fe_x, init_x); - secp256k1_fe_set_b32(&data->fe_y, init_y); - CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0)); - CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1)); - secp256k1_gej_set_ge(&data->gej_x, &data->ge_x); - secp256k1_gej_set_ge(&data->gej_y, &data->ge_y); + rustsecp256k1_v0_1_0_scalar_set_b32(&data->scalar_x, init_x, NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(&data->scalar_y, init_y, NULL); + rustsecp256k1_v0_1_0_fe_set_b32(&data->fe_x, init_x); + rustsecp256k1_v0_1_0_fe_set_b32(&data->fe_y, init_y); + CHECK(rustsecp256k1_v0_1_0_ge_set_xo_var(&data->ge_x, &data->fe_x, 0)); + CHECK(rustsecp256k1_v0_1_0_ge_set_xo_var(&data->ge_y, &data->fe_y, 1)); + rustsecp256k1_v0_1_0_gej_set_ge(&data->gej_x, &data->ge_x); + rustsecp256k1_v0_1_0_gej_set_ge(&data->gej_y, &data->ge_y); memcpy(data->data, init_x, 32); memcpy(data->data + 32, init_y, 32); } @@ -61,7 +61,7 @@ void bench_scalar_add(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000000; i++) { - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -70,7 +70,7 @@ void bench_scalar_negate(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000000; i++) { - secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_negate(&data->scalar_x, &data->scalar_x); } } @@ -79,7 +79,7 @@ void bench_scalar_sqr(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_sqr(&data->scalar_x, &data->scalar_x); } } @@ -88,7 +88,7 @@ void bench_scalar_mul(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -98,9 +98,9 @@ void bench_scalar_split(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_scalar l, r; - secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar l, r; + rustsecp256k1_v0_1_0_scalar_split_lambda(&l, &r, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } #endif @@ -110,8 +110,8 @@ void bench_scalar_inverse(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000; i++) { - secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar_inverse(&data->scalar_x, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -120,8 +120,8 @@ void bench_scalar_inverse_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000; i++) { - secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar_inverse_var(&data->scalar_x, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -130,7 +130,7 @@ void bench_field_normalize(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000000; i++) { - secp256k1_fe_normalize(&data->fe_x); + rustsecp256k1_v0_1_0_fe_normalize(&data->fe_x); } } @@ -139,7 +139,7 @@ void bench_field_normalize_weak(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 2000000; i++) { - secp256k1_fe_normalize_weak(&data->fe_x); + rustsecp256k1_v0_1_0_fe_normalize_weak(&data->fe_x); } } @@ -148,7 +148,7 @@ void bench_field_mul(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y); + rustsecp256k1_v0_1_0_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y); } } @@ -157,7 +157,7 @@ void bench_field_sqr(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_fe_sqr(&data->fe_x, &data->fe_x); + rustsecp256k1_v0_1_0_fe_sqr(&data->fe_x, &data->fe_x); } } @@ -166,8 +166,8 @@ void bench_field_inverse(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_fe_inv(&data->fe_x, &data->fe_x); - secp256k1_fe_add(&data->fe_x, &data->fe_y); + rustsecp256k1_v0_1_0_fe_inv(&data->fe_x, &data->fe_x); + rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y); } } @@ -176,20 +176,20 @@ void bench_field_inverse_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_fe_inv_var(&data->fe_x, &data->fe_x); - secp256k1_fe_add(&data->fe_x, &data->fe_y); + rustsecp256k1_v0_1_0_fe_inv_var(&data->fe_x, &data->fe_x); + rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y); } } void bench_field_sqrt(void* arg) { int i; bench_inv *data = (bench_inv*)arg; - secp256k1_fe t; + rustsecp256k1_v0_1_0_fe t; for (i = 0; i < 20000; i++) { t = data->fe_x; - secp256k1_fe_sqrt(&data->fe_x, &t); - secp256k1_fe_add(&data->fe_x, &data->fe_y); + rustsecp256k1_v0_1_0_fe_sqrt(&data->fe_x, &t); + rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y); } } @@ -198,7 +198,7 @@ void bench_group_double_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&data->gej_x, &data->gej_x, NULL); } } @@ -207,7 +207,7 @@ void bench_group_add_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL); + rustsecp256k1_v0_1_0_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL); } } @@ -216,7 +216,7 @@ void bench_group_add_affine(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y); + rustsecp256k1_v0_1_0_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y); } } @@ -225,7 +225,7 @@ void bench_group_add_affine_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 200000; i++) { - secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL); } } @@ -234,7 +234,7 @@ void bench_group_jacobi_var(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_gej_has_quad_y_var(&data->gej_x); + rustsecp256k1_v0_1_0_gej_has_quad_y_var(&data->gej_x); } } @@ -243,8 +243,8 @@ void bench_ecmult_wnaf(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -253,8 +253,8 @@ void bench_wnaf_const(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); + rustsecp256k1_v0_1_0_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256); + rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } @@ -262,35 +262,35 @@ void bench_wnaf_const(void* arg) { void bench_sha256(void* arg) { int i; bench_inv *data = (bench_inv*)arg; - secp256k1_sha256 sha; + rustsecp256k1_v0_1_0_sha256 sha; for (i = 0; i < 20000; i++) { - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, data->data, 32); - secp256k1_sha256_finalize(&sha, data->data); + rustsecp256k1_v0_1_0_sha256_initialize(&sha); + rustsecp256k1_v0_1_0_sha256_write(&sha, data->data, 32); + rustsecp256k1_v0_1_0_sha256_finalize(&sha, data->data); } } void bench_hmac_sha256(void* arg) { int i; bench_inv *data = (bench_inv*)arg; - secp256k1_hmac_sha256 hmac; + rustsecp256k1_v0_1_0_hmac_sha256 hmac; for (i = 0; i < 20000; i++) { - secp256k1_hmac_sha256_initialize(&hmac, data->data, 32); - secp256k1_hmac_sha256_write(&hmac, data->data, 32); - secp256k1_hmac_sha256_finalize(&hmac, data->data); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, data->data, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, data->data, 32); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, data->data); } } void bench_rfc6979_hmac_sha256(void* arg) { int i; bench_inv *data = (bench_inv*)arg; - secp256k1_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng; for (i = 0; i < 20000; i++) { - secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); - secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32); } } @@ -298,7 +298,7 @@ void bench_context_verify(void* arg) { int i; (void)arg; for (i = 0; i < 20; i++) { - secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_VERIFY)); + rustsecp256k1_v0_1_0_context_destroy(rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY)); } } @@ -306,7 +306,7 @@ void bench_context_sign(void* arg) { int i; (void)arg; for (i = 0; i < 200; i++) { - secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_SIGN)); + rustsecp256k1_v0_1_0_context_destroy(rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN)); } } @@ -314,14 +314,14 @@ void bench_context_sign(void* arg) { void bench_num_jacobi(void* arg) { int i; bench_inv *data = (bench_inv*)arg; - secp256k1_num nx, norder; + rustsecp256k1_v0_1_0_num nx, norder; - secp256k1_scalar_get_num(&nx, &data->scalar_x); - secp256k1_scalar_order_get_num(&norder); - secp256k1_scalar_get_num(&norder, &data->scalar_y); + rustsecp256k1_v0_1_0_scalar_get_num(&nx, &data->scalar_x); + rustsecp256k1_v0_1_0_scalar_order_get_num(&norder); + rustsecp256k1_v0_1_0_scalar_get_num(&norder, &data->scalar_y); for (i = 0; i < 200000; i++) { - secp256k1_num_jacobi(&nx, &norder); + rustsecp256k1_v0_1_0_num_jacobi(&nx, &norder); } } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/bench_recover.c b/secp256k1-sys/depend/secp256k1/src/bench_recover.c index b806eed..b4b1d82 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_recover.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_recover.c @@ -10,7 +10,7 @@ #include "bench.h" typedef struct { - secp256k1_context *ctx; + rustsecp256k1_v0_1_0_context *ctx; unsigned char msg[32]; unsigned char sig[64]; } bench_recover_data; @@ -18,16 +18,16 @@ typedef struct { void bench_recover(void* arg) { int i; bench_recover_data *data = (bench_recover_data*)arg; - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; unsigned char pubkeyc[33]; for (i = 0; i < 20000; i++) { int j; size_t pubkeylen = 33; - secp256k1_ecdsa_recoverable_signature sig; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); - CHECK(secp256k1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); - CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature sig; + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); for (j = 0; j < 32; j++) { data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ data->msg[j] = data->sig[j]; /* Move former R to message. */ @@ -51,10 +51,10 @@ void bench_recover_setup(void* arg) { int main(void) { bench_recover_data data; - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY); run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000); - secp256k1_context_destroy(data.ctx); + rustsecp256k1_v0_1_0_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_sign.c b/secp256k1-sys/depend/secp256k1/src/bench_sign.c index 544b439..ad6fd91 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_sign.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_sign.c @@ -9,7 +9,7 @@ #include "bench.h" typedef struct { - secp256k1_context* ctx; + rustsecp256k1_v0_1_0_context* ctx; unsigned char msg[32]; unsigned char key[32]; } bench_sign; @@ -34,9 +34,9 @@ static void bench_sign_run(void* arg) { for (i = 0; i < 20000; i++) { size_t siglen = 74; int j; - secp256k1_ecdsa_signature signature; - CHECK(secp256k1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); - CHECK(secp256k1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); + rustsecp256k1_v0_1_0_ecdsa_signature signature; + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); for (j = 0; j < 32; j++) { data->msg[j] = sig[j]; data->key[j] = sig[j + 32]; @@ -47,10 +47,10 @@ static void bench_sign_run(void* arg) { int main(void) { bench_sign data; - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN); run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, 20000); - secp256k1_context_destroy(data.ctx); + rustsecp256k1_v0_1_0_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_verify.c b/secp256k1-sys/depend/secp256k1/src/bench_verify.c index 418defa..b017d29 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_verify.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_verify.c @@ -18,7 +18,7 @@ #endif typedef struct { - secp256k1_context *ctx; + rustsecp256k1_v0_1_0_context *ctx; unsigned char msg[32]; unsigned char key[32]; unsigned char sig[72]; @@ -35,14 +35,14 @@ static void benchmark_verify(void* arg) { benchmark_verify_t* data = (benchmark_verify_t*)arg; for (i = 0; i < 20000; i++) { - secp256k1_pubkey pubkey; - secp256k1_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_ecdsa_signature sig; data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); - CHECK(secp256k1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); @@ -81,11 +81,11 @@ static void benchmark_verify_openssl(void* arg) { int main(void) { int i; - secp256k1_pubkey pubkey; - secp256k1_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_ecdsa_signature sig; benchmark_verify_t data; - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; @@ -94,11 +94,11 @@ int main(void) { data.key[i] = 33 + i; } data.siglen = 72; - CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); - CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); - CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(data.ctx, &pubkey, data.key)); data.pubkeylen = 33; - CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); #ifdef ENABLE_OPENSSL_TESTS @@ -107,6 +107,6 @@ int main(void) { EC_GROUP_free(data.ec_group); #endif - secp256k1_context_destroy(data.ctx); + rustsecp256k1_v0_1_0_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa.h b/secp256k1-sys/depend/secp256k1/src/ecdsa.h index 80590c7..d9f9f47 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa.h @@ -13,9 +13,9 @@ #include "group.h" #include "ecmult.h" -static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *r, secp256k1_scalar *s, const unsigned char *sig, size_t size); -static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar *r, const secp256k1_scalar *s); -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar* r, const secp256k1_scalar* s, const secp256k1_ge *pubkey, const secp256k1_scalar *message); -static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid); +static int rustsecp256k1_v0_1_0_ecdsa_sig_parse(rustsecp256k1_v0_1_0_scalar *r, rustsecp256k1_v0_1_0_scalar *s, const unsigned char *sig, size_t size); +static int rustsecp256k1_v0_1_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *s); +static int rustsecp256k1_v0_1_0_ecdsa_sig_verify(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar* r, const rustsecp256k1_v0_1_0_scalar* s, const rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message); +static int rustsecp256k1_v0_1_0_ecdsa_sig_sign(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_scalar* r, rustsecp256k1_v0_1_0_scalar* s, const rustsecp256k1_v0_1_0_scalar *seckey, const rustsecp256k1_v0_1_0_scalar *message, const rustsecp256k1_v0_1_0_scalar *nonce, int *recid); #endif /* SECP256K1_ECDSA_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h index c340004..d6afb70 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h @@ -28,7 +28,7 @@ * sage: '%x' % (EllipticCurve ([F (a), F (b)]).order()) * 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' */ -static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_1_0_fe rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL ); @@ -42,11 +42,11 @@ static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST * sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order()) * '14551231950b75fc4402da1722fc9baee' */ -static const secp256k1_fe secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_1_0_fe rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL ); -static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned char *sigend) { +static int rustsecp256k1_v0_1_0_der_read_len(const unsigned char **sigp, const unsigned char *sigend) { int lenleft, b1; size_t ret = 0; if (*sigp >= sigend) { @@ -96,7 +96,7 @@ static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned cha return ret; } -static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char **sig, const unsigned char *sigend) { +static int rustsecp256k1_v0_1_0_der_parse_integer(rustsecp256k1_v0_1_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) { int overflow = 0; unsigned char ra[32] = {0}; int rlen; @@ -106,7 +106,7 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char return 0; } (*sig)++; - rlen = secp256k1_der_read_len(sig, sigend); + rlen = rustsecp256k1_v0_1_0_der_read_len(sig, sigend); if (rlen <= 0 || (*sig) + rlen > sigend) { /* Exceeds bounds or not at least length 1 (X.690-0207 8.3.1). */ return 0; @@ -133,23 +133,23 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char } if (!overflow) { memcpy(ra + 32 - rlen, *sig, rlen); - secp256k1_scalar_set_b32(r, ra, &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(r, ra, &overflow); } if (overflow) { - secp256k1_scalar_set_int(r, 0); + rustsecp256k1_v0_1_0_scalar_set_int(r, 0); } (*sig) += rlen; return 1; } -static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, const unsigned char *sig, size_t size) { +static int rustsecp256k1_v0_1_0_ecdsa_sig_parse(rustsecp256k1_v0_1_0_scalar *rr, rustsecp256k1_v0_1_0_scalar *rs, const unsigned char *sig, size_t size) { const unsigned char *sigend = sig + size; int rlen; if (sig == sigend || *(sig++) != 0x30) { /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ return 0; } - rlen = secp256k1_der_read_len(&sig, sigend); + rlen = rustsecp256k1_v0_1_0_der_read_len(&sig, sigend); if (rlen < 0 || sig + rlen > sigend) { /* Tuple exceeds bounds */ return 0; @@ -159,10 +159,10 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, return 0; } - if (!secp256k1_der_parse_integer(rr, &sig, sigend)) { + if (!rustsecp256k1_v0_1_0_der_parse_integer(rr, &sig, sigend)) { return 0; } - if (!secp256k1_der_parse_integer(rs, &sig, sigend)) { + if (!rustsecp256k1_v0_1_0_der_parse_integer(rs, &sig, sigend)) { return 0; } @@ -174,12 +174,12 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, return 1; } -static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar* ar, const secp256k1_scalar* as) { +static int rustsecp256k1_v0_1_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_1_0_scalar* ar, const rustsecp256k1_v0_1_0_scalar* as) { unsigned char r[33] = {0}, s[33] = {0}; unsigned char *rp = r, *sp = s; size_t lenR = 33, lenS = 33; - secp256k1_scalar_get_b32(&r[1], ar); - secp256k1_scalar_get_b32(&s[1], as); + rustsecp256k1_v0_1_0_scalar_get_b32(&r[1], ar); + rustsecp256k1_v0_1_0_scalar_get_b32(&s[1], as); while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } if (*size < 6+lenS+lenR) { @@ -198,42 +198,42 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const return 1; } -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) { +static int rustsecp256k1_v0_1_0_ecdsa_sig_verify(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar *sigr, const rustsecp256k1_v0_1_0_scalar *sigs, const rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message) { unsigned char c[32]; - secp256k1_scalar sn, u1, u2; + rustsecp256k1_v0_1_0_scalar sn, u1, u2; #if !defined(EXHAUSTIVE_TEST_ORDER) - secp256k1_fe xr; + rustsecp256k1_v0_1_0_fe xr; #endif - secp256k1_gej pubkeyj; - secp256k1_gej pr; + rustsecp256k1_v0_1_0_gej pubkeyj; + rustsecp256k1_v0_1_0_gej pr; - if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(sigr) || rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) { return 0; } - secp256k1_scalar_inverse_var(&sn, sigs); - secp256k1_scalar_mul(&u1, &sn, message); - secp256k1_scalar_mul(&u2, &sn, sigr); - secp256k1_gej_set_ge(&pubkeyj, pubkey); - secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); - if (secp256k1_gej_is_infinity(&pr)) { + rustsecp256k1_v0_1_0_scalar_inverse_var(&sn, sigs); + rustsecp256k1_v0_1_0_scalar_mul(&u1, &sn, message); + rustsecp256k1_v0_1_0_scalar_mul(&u2, &sn, sigr); + rustsecp256k1_v0_1_0_gej_set_ge(&pubkeyj, pubkey); + rustsecp256k1_v0_1_0_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); + if (rustsecp256k1_v0_1_0_gej_is_infinity(&pr)) { return 0; } #if defined(EXHAUSTIVE_TEST_ORDER) { - secp256k1_scalar computed_r; - secp256k1_ge pr_ge; - secp256k1_ge_set_gej(&pr_ge, &pr); - secp256k1_fe_normalize(&pr_ge.x); + rustsecp256k1_v0_1_0_scalar computed_r; + rustsecp256k1_v0_1_0_ge pr_ge; + rustsecp256k1_v0_1_0_ge_set_gej(&pr_ge, &pr); + rustsecp256k1_v0_1_0_fe_normalize(&pr_ge.x); - secp256k1_fe_get_b32(c, &pr_ge.x); - secp256k1_scalar_set_b32(&computed_r, c, NULL); - return secp256k1_scalar_eq(sigr, &computed_r); + rustsecp256k1_v0_1_0_fe_get_b32(c, &pr_ge.x); + rustsecp256k1_v0_1_0_scalar_set_b32(&computed_r, c, NULL); + return rustsecp256k1_v0_1_0_scalar_eq(sigr, &computed_r); } #else - secp256k1_scalar_get_b32(c, sigr); - secp256k1_fe_set_b32(&xr, c); + rustsecp256k1_v0_1_0_scalar_get_b32(c, sigr); + rustsecp256k1_v0_1_0_fe_set_b32(&xr, c); /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n) * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), @@ -249,18 +249,18 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) * * Thus, we can avoid the inversion, but we have to check both cases separately. - * secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. + * rustsecp256k1_v0_1_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. */ - if (secp256k1_gej_eq_x_var(&xr, &pr)) { + if (rustsecp256k1_v0_1_0_gej_eq_x_var(&xr, &pr)) { /* xr * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } - if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_1_0_fe_cmp_var(&xr, &rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order) >= 0) { /* xr + n >= p, so we can skip testing the second case. */ return 0; } - secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe); - if (secp256k1_gej_eq_x_var(&xr, &pr)) { + rustsecp256k1_v0_1_0_fe_add(&xr, &rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe); + if (rustsecp256k1_v0_1_0_gej_eq_x_var(&xr, &pr)) { /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } @@ -268,41 +268,41 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const #endif } -static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) { +static int rustsecp256k1_v0_1_0_ecdsa_sig_sign(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_scalar *sigr, rustsecp256k1_v0_1_0_scalar *sigs, const rustsecp256k1_v0_1_0_scalar *seckey, const rustsecp256k1_v0_1_0_scalar *message, const rustsecp256k1_v0_1_0_scalar *nonce, int *recid) { unsigned char b[32]; - secp256k1_gej rp; - secp256k1_ge r; - secp256k1_scalar n; + rustsecp256k1_v0_1_0_gej rp; + rustsecp256k1_v0_1_0_ge r; + rustsecp256k1_v0_1_0_scalar n; int overflow = 0; - secp256k1_ecmult_gen(ctx, &rp, nonce); - secp256k1_ge_set_gej(&r, &rp); - secp256k1_fe_normalize(&r.x); - secp256k1_fe_normalize(&r.y); - secp256k1_fe_get_b32(b, &r.x); - secp256k1_scalar_set_b32(sigr, b, &overflow); + rustsecp256k1_v0_1_0_ecmult_gen(ctx, &rp, nonce); + rustsecp256k1_v0_1_0_ge_set_gej(&r, &rp); + rustsecp256k1_v0_1_0_fe_normalize(&r.x); + rustsecp256k1_v0_1_0_fe_normalize(&r.y); + rustsecp256k1_v0_1_0_fe_get_b32(b, &r.x); + rustsecp256k1_v0_1_0_scalar_set_b32(sigr, b, &overflow); /* These two conditions should be checked before calling */ - VERIFY_CHECK(!secp256k1_scalar_is_zero(sigr)); + VERIFY_CHECK(!rustsecp256k1_v0_1_0_scalar_is_zero(sigr)); VERIFY_CHECK(overflow == 0); if (recid) { /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. */ - *recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0); + *recid = (overflow ? 2 : 0) | (rustsecp256k1_v0_1_0_fe_is_odd(&r.y) ? 1 : 0); } - secp256k1_scalar_mul(&n, sigr, seckey); - secp256k1_scalar_add(&n, &n, message); - secp256k1_scalar_inverse(sigs, nonce); - secp256k1_scalar_mul(sigs, sigs, &n); - secp256k1_scalar_clear(&n); - secp256k1_gej_clear(&rp); - secp256k1_ge_clear(&r); - if (secp256k1_scalar_is_zero(sigs)) { + rustsecp256k1_v0_1_0_scalar_mul(&n, sigr, seckey); + rustsecp256k1_v0_1_0_scalar_add(&n, &n, message); + rustsecp256k1_v0_1_0_scalar_inverse(sigs, nonce); + rustsecp256k1_v0_1_0_scalar_mul(sigs, sigs, &n); + rustsecp256k1_v0_1_0_scalar_clear(&n); + rustsecp256k1_v0_1_0_gej_clear(&rp); + rustsecp256k1_v0_1_0_ge_clear(&r); + if (rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) { return 0; } - if (secp256k1_scalar_is_high(sigs)) { - secp256k1_scalar_negate(sigs, sigs); + if (rustsecp256k1_v0_1_0_scalar_is_high(sigs)) { + rustsecp256k1_v0_1_0_scalar_negate(sigs, sigs); if (recid) { *recid ^= 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/eckey.h b/secp256k1-sys/depend/secp256k1/src/eckey.h index b621f1e..55de674 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey.h @@ -14,12 +14,12 @@ #include "ecmult.h" #include "ecmult_gen.h" -static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size); -static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed); +static int rustsecp256k1_v0_1_0_eckey_pubkey_parse(rustsecp256k1_v0_1_0_ge *elem, const unsigned char *pub, size_t size); +static int rustsecp256k1_v0_1_0_eckey_pubkey_serialize(rustsecp256k1_v0_1_0_ge *elem, unsigned char *pub, size_t *size, int compressed); -static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak); +static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_add(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak); +static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak); +static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak); +static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak); #endif /* SECP256K1_ECKEY_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h index 7c5b789..c0ba4ce 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h @@ -14,86 +14,86 @@ #include "group.h" #include "ecmult_gen.h" -static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size) { +static int rustsecp256k1_v0_1_0_eckey_pubkey_parse(rustsecp256k1_v0_1_0_ge *elem, const unsigned char *pub, size_t size) { if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) { - secp256k1_fe x; - return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); + rustsecp256k1_v0_1_0_fe x; + return rustsecp256k1_v0_1_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_1_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); } else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { - secp256k1_fe x, y; - if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) { + rustsecp256k1_v0_1_0_fe x, y; + if (!rustsecp256k1_v0_1_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_1_0_fe_set_b32(&y, pub+33)) { return 0; } - secp256k1_ge_set_xy(elem, &x, &y); + rustsecp256k1_v0_1_0_ge_set_xy(elem, &x, &y); if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) && - secp256k1_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { + rustsecp256k1_v0_1_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { return 0; } - return secp256k1_ge_is_valid_var(elem); + return rustsecp256k1_v0_1_0_ge_is_valid_var(elem); } else { return 0; } } -static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed) { - if (secp256k1_ge_is_infinity(elem)) { +static int rustsecp256k1_v0_1_0_eckey_pubkey_serialize(rustsecp256k1_v0_1_0_ge *elem, unsigned char *pub, size_t *size, int compressed) { + if (rustsecp256k1_v0_1_0_ge_is_infinity(elem)) { return 0; } - secp256k1_fe_normalize_var(&elem->x); - secp256k1_fe_normalize_var(&elem->y); - secp256k1_fe_get_b32(&pub[1], &elem->x); + rustsecp256k1_v0_1_0_fe_normalize_var(&elem->x); + rustsecp256k1_v0_1_0_fe_normalize_var(&elem->y); + rustsecp256k1_v0_1_0_fe_get_b32(&pub[1], &elem->x); if (compressed) { *size = 33; - pub[0] = secp256k1_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; + pub[0] = rustsecp256k1_v0_1_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; } else { *size = 65; pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - secp256k1_fe_get_b32(&pub[33], &elem->y); + rustsecp256k1_v0_1_0_fe_get_b32(&pub[33], &elem->y); } return 1; } -static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak) { - secp256k1_scalar_add(key, key, tweak); - if (secp256k1_scalar_is_zero(key)) { +static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_add(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak) { + rustsecp256k1_v0_1_0_scalar_add(key, key, tweak); + if (rustsecp256k1_v0_1_0_scalar_is_zero(key)) { return 0; } return 1; } -static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) { - secp256k1_gej pt; - secp256k1_scalar one; - secp256k1_gej_set_ge(&pt, key); - secp256k1_scalar_set_int(&one, 1); - secp256k1_ecmult(ctx, &pt, &pt, &one, tweak); +static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak) { + rustsecp256k1_v0_1_0_gej pt; + rustsecp256k1_v0_1_0_scalar one; + rustsecp256k1_v0_1_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_1_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_1_0_ecmult(ctx, &pt, &pt, &one, tweak); - if (secp256k1_gej_is_infinity(&pt)) { + if (rustsecp256k1_v0_1_0_gej_is_infinity(&pt)) { return 0; } - secp256k1_ge_set_gej(key, &pt); + rustsecp256k1_v0_1_0_ge_set_gej(key, &pt); return 1; } -static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak) { - if (secp256k1_scalar_is_zero(tweak)) { +static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(tweak)) { return 0; } - secp256k1_scalar_mul(key, key, tweak); + rustsecp256k1_v0_1_0_scalar_mul(key, key, tweak); return 1; } -static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) { - secp256k1_scalar zero; - secp256k1_gej pt; - if (secp256k1_scalar_is_zero(tweak)) { +static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak) { + rustsecp256k1_v0_1_0_scalar zero; + rustsecp256k1_v0_1_0_gej pt; + if (rustsecp256k1_v0_1_0_scalar_is_zero(tweak)) { return 0; } - secp256k1_scalar_set_int(&zero, 0); - secp256k1_gej_set_ge(&pt, key); - secp256k1_ecmult(ctx, &pt, &pt, tweak, &zero); - secp256k1_ge_set_gej(key, &pt); + rustsecp256k1_v0_1_0_scalar_set_int(&zero, 0); + rustsecp256k1_v0_1_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_1_0_ecmult(ctx, &pt, &pt, tweak, &zero); + rustsecp256k1_v0_1_0_ge_set_gej(key, &pt); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult.h b/secp256k1-sys/depend/secp256k1/src/ecmult.h index c9b1982..3481060 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult.h @@ -14,23 +14,23 @@ typedef struct { /* For accelerating the computation of a*P + b*G: */ - secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */ + rustsecp256k1_v0_1_0_ge_storage (*pre_g)[]; /* odd multiples of the generator */ #ifdef USE_ENDOMORPHISM - secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ + rustsecp256k1_v0_1_0_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ #endif -} secp256k1_ecmult_context; +} rustsecp256k1_v0_1_0_ecmult_context; static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; -static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx); -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc); -static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src); -static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx); -static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx); +static void rustsecp256k1_v0_1_0_ecmult_context_init(rustsecp256k1_v0_1_0_ecmult_context *ctx); +static void rustsecp256k1_v0_1_0_ecmult_context_build(rustsecp256k1_v0_1_0_ecmult_context *ctx, void **prealloc); +static void rustsecp256k1_v0_1_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_context *dst, const rustsecp256k1_v0_1_0_ecmult_context *src); +static void rustsecp256k1_v0_1_0_ecmult_context_clear(rustsecp256k1_v0_1_0_ecmult_context *ctx); +static int rustsecp256k1_v0_1_0_ecmult_context_is_built(const rustsecp256k1_v0_1_0_ecmult_context *ctx); /** Double multiply: R = na*A + ng*G */ -static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng); +static void rustsecp256k1_v0_1_0_ecmult(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_scalar *na, const rustsecp256k1_v0_1_0_scalar *ng); -typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data); +typedef int (rustsecp256k1_v0_1_0_ecmult_multi_callback)(rustsecp256k1_v0_1_0_scalar *sc, rustsecp256k1_v0_1_0_ge *pt, size_t idx, void *data); /** * Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai. @@ -43,6 +43,6 @@ typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge * 0 if there is not enough scratch space for a single point or * callback returns 0 */ -static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n); +static int rustsecp256k1_v0_1_0_ecmult_multi_var(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h index d4804b8..bc81cc1 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h @@ -12,6 +12,6 @@ /* Here `bits` should be set to the maximum bitlength of the _absolute value_ of `q`, plus * one because we internally sometimes add 2 to the number during the WNAF conversion. */ -static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q, int bits); +static void rustsecp256k1_v0_1_0_ecmult_const(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_scalar *q, int bits); #endif /* SECP256K1_ECMULT_CONST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h index aaa576a..fa6e220 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h @@ -17,21 +17,21 @@ int m; \ int abs_n = (n) * (((n) > 0) * 2 - 1); \ int idx_n = abs_n / 2; \ - secp256k1_fe neg_y; \ + rustsecp256k1_v0_1_0_fe neg_y; \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ + VERIFY_SETUP(rustsecp256k1_v0_1_0_fe_clear(&(r)->x)); \ + VERIFY_SETUP(rustsecp256k1_v0_1_0_fe_clear(&(r)->y)); \ for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \ /* This loop is used to avoid secret data in array indices. See * the comment in ecmult_gen_impl.h for rationale. */ \ - secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ - secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ + rustsecp256k1_v0_1_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ + rustsecp256k1_v0_1_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ } \ (r)->infinity = 0; \ - secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ - secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ + rustsecp256k1_v0_1_0_fe_negate(&neg_y, &(r)->y, 1); \ + rustsecp256k1_v0_1_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ } while(0) @@ -48,7 +48,7 @@ * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ -static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) { +static int rustsecp256k1_v0_1_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_1_0_scalar *scalar, int w, int size) { int global_sign; int skew = 0; int word = 0; @@ -59,7 +59,7 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w int flip; int bit; - secp256k1_scalar s; + rustsecp256k1_v0_1_0_scalar s; int not_neg_one; VERIFY_CHECK(w > 0); @@ -77,33 +77,33 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w * particular, to ensure that the outputs from the endomorphism-split fit into * 128 bits). If we negate, the parity of our number flips, inverting which of * {1, 2} we want to add to the scalar when ensuring that it's odd. Further - * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and + * complicating things, -1 interacts badly with `rustsecp256k1_v0_1_0_scalar_cadd_bit` and * we need to special-case it in this logic. */ - flip = secp256k1_scalar_is_high(scalar); + flip = rustsecp256k1_v0_1_0_scalar_is_high(scalar); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ !secp256k1_scalar_is_even(scalar); + bit = flip ^ !rustsecp256k1_v0_1_0_scalar_is_even(scalar); /* We check for negative one, since adding 2 to it will cause an overflow */ - secp256k1_scalar_negate(&s, scalar); - not_neg_one = !secp256k1_scalar_is_one(&s); + rustsecp256k1_v0_1_0_scalar_negate(&s, scalar); + not_neg_one = !rustsecp256k1_v0_1_0_scalar_is_one(&s); s = *scalar; - secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); + rustsecp256k1_v0_1_0_scalar_cadd_bit(&s, bit, not_neg_one); /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects * that we added two to it and flipped it. In fact for -1 these operations are * identical. We only flipped, but since skewing is required (in the sense that * the skew must be 1 or 2, never zero) and flipping is not, we need to change * our flags to claim that we only skewed. */ - global_sign = secp256k1_scalar_cond_negate(&s, flip); + global_sign = rustsecp256k1_v0_1_0_scalar_cond_negate(&s, flip); global_sign *= not_neg_one * 2 - 1; skew = 1 << bit; /* 4 */ - u_last = secp256k1_scalar_shr_int(&s, w); + u_last = rustsecp256k1_v0_1_0_scalar_shr_int(&s, w); do { int sign; int even; /* 4.1 4.4 */ - u = secp256k1_scalar_shr_int(&s, w); + u = rustsecp256k1_v0_1_0_scalar_shr_int(&s, w); /* 4.2 */ even = ((u & 1) == 0); sign = 2 * (u_last > 0) - 1; @@ -117,22 +117,22 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w } while (word * w < size); wnaf[word] = u * global_sign; - VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); + VERIFY_CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&s)); VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); return skew; } -static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) { - secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_ge tmpa; - secp256k1_fe Z; +static void rustsecp256k1_v0_1_0_ecmult_const(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_scalar *scalar, int size) { + rustsecp256k1_v0_1_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_ge tmpa; + rustsecp256k1_v0_1_0_fe Z; int skew_1; #ifdef USE_ENDOMORPHISM - secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; int skew_lam; - secp256k1_scalar q_1, q_lam; + rustsecp256k1_v0_1_0_scalar q_1, q_lam; #endif int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; @@ -144,13 +144,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar); - skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); - skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); + rustsecp256k1_v0_1_0_scalar_split_lambda(&q_1, &q_lam, scalar); + skew_1 = rustsecp256k1_v0_1_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); + skew_lam = rustsecp256k1_v0_1_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else #endif { - skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); + skew_1 = rustsecp256k1_v0_1_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); #ifdef USE_ENDOMORPHISM skew_lam = 0; #endif @@ -162,15 +162,15 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. */ - secp256k1_gej_set_ge(r, a); - secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); + rustsecp256k1_v0_1_0_gej_set_ge(r, a); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_fe_normalize_weak(&pre_a[i].y); + rustsecp256k1_v0_1_0_fe_normalize_weak(&pre_a[i].y); } #ifdef USE_ENDOMORPHISM if (size > 128) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); + rustsecp256k1_v0_1_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } } #endif @@ -181,13 +181,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - secp256k1_gej_set_ge(r, &tmpa); + rustsecp256k1_v0_1_0_gej_set_ge(r, &tmpa); #ifdef USE_ENDOMORPHISM if (size > 128) { i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); - secp256k1_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa); } #endif /* remaining loop iterations */ @@ -195,64 +195,64 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons int n; int j; for (j = 0; j < WINDOW_A - 1; ++j) { - secp256k1_gej_double_nonzero(r, r, NULL); + rustsecp256k1_v0_1_0_gej_double_nonzero(r, r, NULL); } n = wnaf_1[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); - secp256k1_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa); #ifdef USE_ENDOMORPHISM if (size > 128) { n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); - secp256k1_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa); } #endif } - secp256k1_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &Z); { /* Correct for wNAF skew */ - secp256k1_ge correction = *a; - secp256k1_ge_storage correction_1_stor; + rustsecp256k1_v0_1_0_ge correction = *a; + rustsecp256k1_v0_1_0_ge_storage correction_1_stor; #ifdef USE_ENDOMORPHISM - secp256k1_ge_storage correction_lam_stor; + rustsecp256k1_v0_1_0_ge_storage correction_lam_stor; #endif - secp256k1_ge_storage a2_stor; - secp256k1_gej tmpj; - secp256k1_gej_set_ge(&tmpj, &correction); - secp256k1_gej_double_var(&tmpj, &tmpj, NULL); - secp256k1_ge_set_gej(&correction, &tmpj); - secp256k1_ge_to_storage(&correction_1_stor, a); + rustsecp256k1_v0_1_0_ge_storage a2_stor; + rustsecp256k1_v0_1_0_gej tmpj; + rustsecp256k1_v0_1_0_gej_set_ge(&tmpj, &correction); + rustsecp256k1_v0_1_0_gej_double_var(&tmpj, &tmpj, NULL); + rustsecp256k1_v0_1_0_ge_set_gej(&correction, &tmpj); + rustsecp256k1_v0_1_0_ge_to_storage(&correction_1_stor, a); #ifdef USE_ENDOMORPHISM if (size > 128) { - secp256k1_ge_to_storage(&correction_lam_stor, a); + rustsecp256k1_v0_1_0_ge_to_storage(&correction_lam_stor, a); } #endif - secp256k1_ge_to_storage(&a2_stor, &correction); + rustsecp256k1_v0_1_0_ge_to_storage(&a2_stor, &correction); /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ - secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); + rustsecp256k1_v0_1_0_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); #ifdef USE_ENDOMORPHISM if (size > 128) { - secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); + rustsecp256k1_v0_1_0_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); } #endif /* Apply the correction */ - secp256k1_ge_from_storage(&correction, &correction_1_stor); - secp256k1_ge_neg(&correction, &correction); - secp256k1_gej_add_ge(r, r, &correction); + rustsecp256k1_v0_1_0_ge_from_storage(&correction, &correction_1_stor); + rustsecp256k1_v0_1_0_ge_neg(&correction, &correction); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &correction); #ifdef USE_ENDOMORPHISM if (size > 128) { - secp256k1_ge_from_storage(&correction, &correction_lam_stor); - secp256k1_ge_neg(&correction, &correction); - secp256k1_ge_mul_lambda(&correction, &correction); - secp256k1_gej_add_ge(r, r, &correction); + rustsecp256k1_v0_1_0_ge_from_storage(&correction, &correction_lam_stor); + rustsecp256k1_v0_1_0_ge_neg(&correction, &correction); + rustsecp256k1_v0_1_0_ge_mul_lambda(&correction, &correction); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &correction); } #endif } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h index b136e94..42854c5 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h @@ -23,21 +23,21 @@ typedef struct { * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. */ - secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ - secp256k1_scalar blind; - secp256k1_gej initial; -} secp256k1_ecmult_gen_context; + rustsecp256k1_v0_1_0_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ + rustsecp256k1_v0_1_0_scalar blind; + rustsecp256k1_v0_1_0_gej initial; +} rustsecp256k1_v0_1_0_ecmult_gen_context; static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; -static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx); -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc); -static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src); -static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx); -static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_1_0_ecmult_gen_context_init(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_1_0_ecmult_gen_context_build(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx, void **prealloc); +static void rustsecp256k1_v0_1_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_gen_context *dst, const rustsecp256k1_v0_1_0_ecmult_gen_context* src); +static void rustsecp256k1_v0_1_0_ecmult_gen_context_clear(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx); +static int rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx); /** Multiply with the generator: R = a*G */ -static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context* ctx, secp256k1_gej *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_ecmult_gen(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *a); -static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32); +static void rustsecp256k1_v0_1_0_ecmult_gen_blind(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, const unsigned char *seed32); #endif /* SECP256K1_ECMULT_GEN_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h index f818d45..7c204c3 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h @@ -17,20 +17,20 @@ #endif #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec)); + static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_1_0_ecmult_gen_context*) NULL)->prec)); #else static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0; #endif -static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) { +static void rustsecp256k1_v0_1_0_ecmult_gen_context_init(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx) { ctx->prec = NULL; } -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) { +static void rustsecp256k1_v0_1_0_ecmult_gen_context_build(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, void **prealloc) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - secp256k1_ge prec[1024]; - secp256k1_gej gj; - secp256k1_gej nums_gej; + rustsecp256k1_v0_1_0_ge prec[1024]; + rustsecp256k1_v0_1_0_gej gj; + rustsecp256k1_v0_1_0_gej nums_gej; int i, j; size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; void* const base = *prealloc; @@ -40,101 +40,101 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx return; } #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - ctx->prec = (secp256k1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size); + ctx->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size); /* get the generator */ - secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_gej_set_ge(&gj, &rustsecp256k1_v0_1_0_ge_const_g); /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ { static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; - secp256k1_fe nums_x; - secp256k1_ge nums_ge; + rustsecp256k1_v0_1_0_fe nums_x; + rustsecp256k1_v0_1_0_ge nums_ge; int r; - r = secp256k1_fe_set_b32(&nums_x, nums_b32); + r = rustsecp256k1_v0_1_0_fe_set_b32(&nums_x, nums_b32); (void)r; VERIFY_CHECK(r); - r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0); + r = rustsecp256k1_v0_1_0_ge_set_xo_var(&nums_ge, &nums_x, 0); (void)r; VERIFY_CHECK(r); - secp256k1_gej_set_ge(&nums_gej, &nums_ge); + rustsecp256k1_v0_1_0_gej_set_ge(&nums_gej, &nums_ge); /* Add G to make the bits in x uniformly distributed. */ - secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_1_0_ge_const_g, NULL); } /* compute prec. */ { - secp256k1_gej precj[1024]; /* Jacobian versions of prec. */ - secp256k1_gej gbase; - secp256k1_gej numsbase; + rustsecp256k1_v0_1_0_gej precj[1024]; /* Jacobian versions of prec. */ + rustsecp256k1_v0_1_0_gej gbase; + rustsecp256k1_v0_1_0_gej numsbase; gbase = gj; /* 16^j * G */ numsbase = nums_gej; /* 2^j * nums. */ for (j = 0; j < 64; j++) { /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ precj[j*16] = numsbase; for (i = 1; i < 16; i++) { - secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); + rustsecp256k1_v0_1_0_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); } /* Multiply gbase by 16. */ for (i = 0; i < 4; i++) { - secp256k1_gej_double_var(&gbase, &gbase, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ - secp256k1_gej_double_var(&numsbase, &numsbase, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&numsbase, &numsbase, NULL); if (j == 62) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ - secp256k1_gej_neg(&numsbase, &numsbase); - secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); + rustsecp256k1_v0_1_0_gej_neg(&numsbase, &numsbase); + rustsecp256k1_v0_1_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - secp256k1_ge_set_all_gej_var(prec, precj, 1024); + rustsecp256k1_v0_1_0_ge_set_all_gej_var(prec, precj, 1024); } for (j = 0; j < 64; j++) { for (i = 0; i < 16; i++) { - secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); + rustsecp256k1_v0_1_0_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); } } #else (void)prealloc; - ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context; + ctx->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])rustsecp256k1_v0_1_0_ecmult_static_context; #endif - secp256k1_ecmult_gen_blind(ctx, NULL); + rustsecp256k1_v0_1_0_ecmult_gen_blind(ctx, NULL); } -static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) { +static int rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx) { return ctx->prec != NULL; } -static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) { +static void rustsecp256k1_v0_1_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_gen_context *dst, const rustsecp256k1_v0_1_0_ecmult_gen_context *src) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION if (src->prec != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ - dst->prec = (secp256k1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); + dst->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); } #else (void)dst, (void)src; #endif } -static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) { - secp256k1_scalar_clear(&ctx->blind); - secp256k1_gej_clear(&ctx->initial); +static void rustsecp256k1_v0_1_0_ecmult_gen_context_clear(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx) { + rustsecp256k1_v0_1_0_scalar_clear(&ctx->blind); + rustsecp256k1_v0_1_0_gej_clear(&ctx->initial); ctx->prec = NULL; } -static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) { - secp256k1_ge add; - secp256k1_ge_storage adds; - secp256k1_scalar gnb; +static void rustsecp256k1_v0_1_0_ecmult_gen(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *gn) { + rustsecp256k1_v0_1_0_ge add; + rustsecp256k1_v0_1_0_ge_storage adds; + rustsecp256k1_v0_1_0_scalar gnb; int bits; int i, j; memset(&adds, 0, sizeof(adds)); *r = ctx->initial; /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ - secp256k1_scalar_add(&gnb, gn, &ctx->blind); + rustsecp256k1_v0_1_0_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (j = 0; j < 64; j++) { - bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); + bits = rustsecp256k1_v0_1_0_scalar_get_bits(&gnb, j * 4, 4); for (i = 0; i < 16; i++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing @@ -146,33 +146,33 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25 * by Dag Arne Osvik, Adi Shamir, and Eran Tromer * (http://www.tau.ac.il/~tromer/papers/cache.pdf) */ - secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); + rustsecp256k1_v0_1_0_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); } - secp256k1_ge_from_storage(&add, &adds); - secp256k1_gej_add_ge(r, r, &add); + rustsecp256k1_v0_1_0_ge_from_storage(&add, &adds); + rustsecp256k1_v0_1_0_gej_add_ge(r, r, &add); } bits = 0; - secp256k1_ge_clear(&add); - secp256k1_scalar_clear(&gnb); + rustsecp256k1_v0_1_0_ge_clear(&add); + rustsecp256k1_v0_1_0_scalar_clear(&gnb); } -/* Setup blinding values for secp256k1_ecmult_gen. */ -static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) { - secp256k1_scalar b; - secp256k1_gej gb; - secp256k1_fe s; +/* Setup blinding values for rustsecp256k1_v0_1_0_ecmult_gen. */ +static void rustsecp256k1_v0_1_0_ecmult_gen_blind(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, const unsigned char *seed32) { + rustsecp256k1_v0_1_0_scalar b; + rustsecp256k1_v0_1_0_gej gb; + rustsecp256k1_v0_1_0_fe s; unsigned char nonce32[32]; - secp256k1_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng; int retry; unsigned char keydata[64] = {0}; if (seed32 == NULL) { /* When seed is NULL, reset the initial point and blinding value. */ - secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g); - secp256k1_gej_neg(&ctx->initial, &ctx->initial); - secp256k1_scalar_set_int(&ctx->blind, 1); + rustsecp256k1_v0_1_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_1_0_ge_const_g); + rustsecp256k1_v0_1_0_gej_neg(&ctx->initial, &ctx->initial); + rustsecp256k1_v0_1_0_scalar_set_int(&ctx->blind, 1); } /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ - secp256k1_scalar_get_b32(nonce32, &ctx->blind); + rustsecp256k1_v0_1_0_scalar_get_b32(nonce32, &ctx->blind); /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, * and guards against weak or adversarial seeds. This is a simpler and safer interface than * asking the caller for blinding values directly and expecting them to retry on failure. @@ -181,31 +181,31 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const if (seed32 != NULL) { memcpy(keydata + 32, seed32, 32); } - secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); memset(keydata, 0, sizeof(keydata)); /* Retry for out of range results to achieve uniformity. */ do { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - retry = !secp256k1_fe_set_b32(&s, nonce32); - retry |= secp256k1_fe_is_zero(&s); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + retry = !rustsecp256k1_v0_1_0_fe_set_b32(&s, nonce32); + retry |= rustsecp256k1_v0_1_0_fe_is_zero(&s); } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */ /* Randomize the projection to defend against multiplier sidechannels. */ - secp256k1_gej_rescale(&ctx->initial, &s); - secp256k1_fe_clear(&s); + rustsecp256k1_v0_1_0_gej_rescale(&ctx->initial, &s); + rustsecp256k1_v0_1_0_fe_clear(&s); do { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - secp256k1_scalar_set_b32(&b, nonce32, &retry); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_1_0_scalar_set_b32(&b, nonce32, &retry); /* A blinding value of 0 works, but would undermine the projection hardening. */ - retry |= secp256k1_scalar_is_zero(&b); + retry |= rustsecp256k1_v0_1_0_scalar_is_zero(&b); } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */ - secp256k1_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng); memset(nonce32, 0, 32); - secp256k1_ecmult_gen(ctx, &gb, &b); - secp256k1_scalar_negate(&b, &b); + rustsecp256k1_v0_1_0_ecmult_gen(ctx, &gb, &b); + rustsecp256k1_v0_1_0_scalar_negate(&b, &b); ctx->blind = b; ctx->initial = gb; - secp256k1_scalar_clear(&b); - secp256k1_gej_clear(&gb); + rustsecp256k1_v0_1_0_scalar_clear(&b); + rustsecp256k1_v0_1_0_gej_clear(&gb); } #endif /* SECP256K1_ECMULT_GEN_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h index bb7aded..2cf2c1e 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h @@ -35,8 +35,8 @@ /** Larger values for ECMULT_WINDOW_SIZE result in possibly better * performance at the cost of an exponentially larger precomputed * table. The exact table size is - * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, - * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can + * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_1_0_ge_storage) bytes, + * where sizeof(rustsecp256k1_v0_1_0_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM) * two tables of this size are used instead of only one. @@ -94,14 +94,14 @@ * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. * Prej's Z values are undefined, except for the last value. */ -static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) { - secp256k1_gej d; - secp256k1_ge a_ge, d_ge; +static void rustsecp256k1_v0_1_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_1_0_gej *prej, rustsecp256k1_v0_1_0_fe *zr, const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_gej d; + rustsecp256k1_v0_1_0_ge a_ge, d_ge; int i; VERIFY_CHECK(!a->infinity); - secp256k1_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&d, a, NULL); /* * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate @@ -111,7 +111,7 @@ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, sec d_ge.y = d.y; d_ge.infinity = 0; - secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z); + rustsecp256k1_v0_1_0_ge_set_gej_zinv(&a_ge, a, &d.z); prej[0].x = a_ge.x; prej[0].y = a_ge.y; prej[0].z = a->z; @@ -119,24 +119,24 @@ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, sec zr[0] = d.z; for (i = 1; i < n; i++) { - secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); + rustsecp256k1_v0_1_0_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); } /* * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only * the final point's z coordinate is actually used though, so just update that. */ - secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); + rustsecp256k1_v0_1_0_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); } /** Fill a table 'pre' with precomputed odd multiples of a. * * There are two versions of this function: - * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its + * - rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_globalz_windowa which brings its * resulting point set to a single constant Z denominator, stores the X and Y * coordinates as ge_storage points in pre, and stores the global Z in rz. * It only operates on tables sized for WINDOW_A wnaf multiples. - * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its + * - rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_storage_var, which converts its * resulting point set to actually affine points, and stores those in pre. * It operates on tables of any size, but uses heap-allocated temporaries. * @@ -144,32 +144,32 @@ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, sec * and for G using the second (which requires an inverse, but it only needs to * happen once). */ -static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) { - secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; +static void rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_1_0_ge *pre, rustsecp256k1_v0_1_0_fe *globalz, const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; /* Compute the odd multiples in Jacobian form. */ - secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); /* Bring them to the same Z denominator. */ - secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); + rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); } -static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) { - secp256k1_gej d; - secp256k1_ge d_ge, p_ge; - secp256k1_gej pj; - secp256k1_fe zi; - secp256k1_fe zr; - secp256k1_fe dx_over_dz_squared; +static void rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_storage_var(const int n, rustsecp256k1_v0_1_0_ge_storage *pre, const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_gej d; + rustsecp256k1_v0_1_0_ge d_ge, p_ge; + rustsecp256k1_v0_1_0_gej pj; + rustsecp256k1_v0_1_0_fe zi; + rustsecp256k1_v0_1_0_fe zr; + rustsecp256k1_v0_1_0_fe dx_over_dz_squared; int i; VERIFY_CHECK(!a->infinity); - secp256k1_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&d, a, NULL); /* First, we perform all the additions in an isomorphic curve obtained by multiplying * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use - * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store + * `rustsecp256k1_v0_1_0_gej_add_ge_var` to perform the additions. For each addition, we store * the resulting y-coordinate and the z-ratio, since we only have enough memory to * store two field elements. These are sufficient to efficiently undo the isomorphism * and recompute all the `x`s. @@ -178,34 +178,34 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 d_ge.y = d.y; d_ge.infinity = 0; - secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z); + rustsecp256k1_v0_1_0_ge_set_gej_zinv(&p_ge, a, &d.z); pj.x = p_ge.x; pj.y = p_ge.y; pj.z = a->z; pj.infinity = 0; for (i = 0; i < (n - 1); i++) { - secp256k1_fe_normalize_var(&pj.y); - secp256k1_fe_to_storage(&pre[i].y, &pj.y); - secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr); - secp256k1_fe_normalize_var(&zr); - secp256k1_fe_to_storage(&pre[i].x, &zr); + rustsecp256k1_v0_1_0_fe_normalize_var(&pj.y); + rustsecp256k1_v0_1_0_fe_to_storage(&pre[i].y, &pj.y); + rustsecp256k1_v0_1_0_gej_add_ge_var(&pj, &pj, &d_ge, &zr); + rustsecp256k1_v0_1_0_fe_normalize_var(&zr); + rustsecp256k1_v0_1_0_fe_to_storage(&pre[i].x, &zr); } /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */ - secp256k1_fe_mul(&zi, &pj.z, &d.z); - secp256k1_fe_inv_var(&zi, &zi); + rustsecp256k1_v0_1_0_fe_mul(&zi, &pj.z, &d.z); + rustsecp256k1_v0_1_0_fe_inv_var(&zi, &zi); /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so * that we can combine it with the saved z-ratios to compute the other zs * without any more inversions. */ - secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi); - secp256k1_ge_to_storage(&pre[n - 1], &p_ge); + rustsecp256k1_v0_1_0_ge_set_gej_zinv(&p_ge, &pj, &zi); + rustsecp256k1_v0_1_0_ge_to_storage(&pre[n - 1], &p_ge); /* Compute the actual x-coordinate of D, which will be needed below. */ - secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ - secp256k1_fe_sqr(&dx_over_dz_squared, &d.z); - secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); + rustsecp256k1_v0_1_0_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ + rustsecp256k1_v0_1_0_fe_sqr(&dx_over_dz_squared, &d.z); + rustsecp256k1_v0_1_0_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); /* Going into the second loop, we have set `pre[n-1]` to its final affine * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We @@ -229,21 +229,21 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 */ i = n - 1; while (i > 0) { - secp256k1_fe zi2, zi3; - const secp256k1_fe *rzr; + rustsecp256k1_v0_1_0_fe zi2, zi3; + const rustsecp256k1_v0_1_0_fe *rzr; i--; - secp256k1_ge_from_storage(&p_ge, &pre[i]); + rustsecp256k1_v0_1_0_ge_from_storage(&p_ge, &pre[i]); /* For each remaining point, we extract the z-ratio from the stored * x-coordinate, compute its z^-1 from that, and compute the full * point from that. */ rzr = &p_ge.x; - secp256k1_fe_mul(&zi, &zi, rzr); - secp256k1_fe_sqr(&zi2, &zi); - secp256k1_fe_mul(&zi3, &zi2, &zi); + rustsecp256k1_v0_1_0_fe_mul(&zi, &zi, rzr); + rustsecp256k1_v0_1_0_fe_sqr(&zi2, &zi); + rustsecp256k1_v0_1_0_fe_mul(&zi3, &zi2, &zi); /* To compute the actual x-coordinate, we use the stored z ratio and - * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var` + * y-coordinate, which we obtained from `rustsecp256k1_v0_1_0_gej_add_ge_var` * in the loop above, as well as the inverse of the square of its * z-coordinate. We store the latter in the `zi2` variable, which is * computed iteratively starting from the overall Z inverse then @@ -275,13 +275,13 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 * X = d_x / d_z^2 - rzr / z^2 * = dx_over_dz_squared - rzr * zi2 */ - secp256k1_fe_mul(&p_ge.x, rzr, &zi2); - secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1); - secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared); + rustsecp256k1_v0_1_0_fe_mul(&p_ge.x, rzr, &zi2); + rustsecp256k1_v0_1_0_fe_negate(&p_ge.x, &p_ge.x, 1); + rustsecp256k1_v0_1_0_fe_add(&p_ge.x, &dx_over_dz_squared); /* y is stored_y/z^3, as we expect */ - secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3); + rustsecp256k1_v0_1_0_fe_mul(&p_ge.y, &p_ge.y, &zi3); /* Store */ - secp256k1_ge_to_storage(&pre[i], &p_ge); + rustsecp256k1_v0_1_0_ge_to_storage(&pre[i], &p_ge); } } @@ -295,7 +295,7 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 *(r) = (pre)[((n)-1)/2]; \ } else { \ *(r) = (pre)[(-(n)-1)/2]; \ - secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ + rustsecp256k1_v0_1_0_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) @@ -304,29 +304,29 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ - secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ + rustsecp256k1_v0_1_0_ge_from_storage((r), &(pre)[((n)-1)/2]); \ } else { \ - secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ - secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ + rustsecp256k1_v0_1_0_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ + rustsecp256k1_v0_1_0_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE = - ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) + ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_1_0_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) #ifdef USE_ENDOMORPHISM - + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) + + ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_1_0_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) #endif ; -static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { +static void rustsecp256k1_v0_1_0_ecmult_context_init(rustsecp256k1_v0_1_0_ecmult_context *ctx) { ctx->pre_g = NULL; #ifdef USE_ENDOMORPHISM ctx->pre_g_128 = NULL; #endif } -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) { - secp256k1_gej gj; +static void rustsecp256k1_v0_1_0_ecmult_context_build(rustsecp256k1_v0_1_0_ecmult_context *ctx, void **prealloc) { + rustsecp256k1_v0_1_0_gej gj; void* const base = *prealloc; size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; @@ -335,56 +335,56 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void * } /* get the generator */ - secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_gej_set_ge(&gj, &rustsecp256k1_v0_1_0_ge_const_g); { size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); - ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); + ctx->pre_g = (rustsecp256k1_v0_1_0_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); } /* precompute the tables with odd multiples */ - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); #ifdef USE_ENDOMORPHISM { - secp256k1_gej g_128j; + rustsecp256k1_v0_1_0_gej g_128j; int i; size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); - ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); + ctx->pre_g_128 = (rustsecp256k1_v0_1_0_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); /* calculate 2^128*generator */ g_128j = gj; for (i = 0; i < 128; i++) { - secp256k1_gej_double_var(&g_128j, &g_128j, NULL); + rustsecp256k1_v0_1_0_gej_double_var(&g_128j, &g_128j, NULL); } - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } #endif } -static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) { +static void rustsecp256k1_v0_1_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_context *dst, const rustsecp256k1_v0_1_0_ecmult_context *src) { if (src->pre_g != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ - dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); + dst->pre_g = (rustsecp256k1_v0_1_0_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); } #ifdef USE_ENDOMORPHISM if (src->pre_g_128 != NULL) { - dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); + dst->pre_g_128 = (rustsecp256k1_v0_1_0_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); } #endif } -static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) { +static int rustsecp256k1_v0_1_0_ecmult_context_is_built(const rustsecp256k1_v0_1_0_ecmult_context *ctx) { return ctx->pre_g != NULL; } -static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { - secp256k1_ecmult_context_init(ctx); +static void rustsecp256k1_v0_1_0_ecmult_context_clear(rustsecp256k1_v0_1_0_ecmult_context *ctx) { + rustsecp256k1_v0_1_0_ecmult_context_init(ctx); } /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), @@ -394,8 +394,8 @@ static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ -static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { - secp256k1_scalar s = *a; +static int rustsecp256k1_v0_1_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_1_0_scalar *a, int w) { + rustsecp256k1_v0_1_0_scalar s = *a; int last_set_bit = -1; int bit = 0; int sign = 1; @@ -408,15 +408,15 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, memset(wnaf, 0, len * sizeof(wnaf[0])); - if (secp256k1_scalar_get_bits(&s, 255, 1)) { - secp256k1_scalar_negate(&s, &s); + if (rustsecp256k1_v0_1_0_scalar_get_bits(&s, 255, 1)) { + rustsecp256k1_v0_1_0_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; - if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { + if (rustsecp256k1_v0_1_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } @@ -426,7 +426,7 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, now = len - bit; } - word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; + word = rustsecp256k1_v0_1_0_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; @@ -439,15 +439,15 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, #ifdef VERIFY CHECK(carry == 0); while (bit < 256) { - CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); + CHECK(rustsecp256k1_v0_1_0_scalar_get_bits(&s, bit++, 1) == 0); } #endif return last_set_bit + 1; } -struct secp256k1_strauss_point_state { +struct rustsecp256k1_v0_1_0_strauss_point_state { #ifdef USE_ENDOMORPHISM - secp256k1_scalar na_1, na_lam; + rustsecp256k1_v0_1_0_scalar na_1, na_lam; int wnaf_na_1[130]; int wnaf_na_lam[130]; int bits_na_1; @@ -459,22 +459,22 @@ struct secp256k1_strauss_point_state { size_t input_pos; }; -struct secp256k1_strauss_state { - secp256k1_gej* prej; - secp256k1_fe* zr; - secp256k1_ge* pre_a; +struct rustsecp256k1_v0_1_0_strauss_state { + rustsecp256k1_v0_1_0_gej* prej; + rustsecp256k1_v0_1_0_fe* zr; + rustsecp256k1_v0_1_0_ge* pre_a; #ifdef USE_ENDOMORPHISM - secp256k1_ge* pre_a_lam; + rustsecp256k1_v0_1_0_ge* pre_a_lam; #endif - struct secp256k1_strauss_point_state* ps; + struct rustsecp256k1_v0_1_0_strauss_point_state* ps; }; -static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { - secp256k1_ge tmpa; - secp256k1_fe Z; +static void rustsecp256k1_v0_1_0_ecmult_strauss_wnaf(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const struct rustsecp256k1_v0_1_0_strauss_state *state, rustsecp256k1_v0_1_0_gej *r, int num, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_scalar *na, const rustsecp256k1_v0_1_0_scalar *ng) { + rustsecp256k1_v0_1_0_ge tmpa; + rustsecp256k1_v0_1_0_fe Z; #ifdef USE_ENDOMORPHISM /* Splitted G factors. */ - secp256k1_scalar ng_1, ng_128; + rustsecp256k1_v0_1_0_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; @@ -489,17 +489,17 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c int no = 0; for (np = 0; np < num; ++np) { - if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_1_0_gej_is_infinity(&a[np])) { continue; } state->ps[no].input_pos = np; #ifdef USE_ENDOMORPHISM /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); + rustsecp256k1_v0_1_0_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ - state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A); - state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A); + state->ps[no].bits_na_1 = rustsecp256k1_v0_1_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A); + state->ps[no].bits_na_lam = rustsecp256k1_v0_1_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 130); VERIFY_CHECK(state->ps[no].bits_na_lam <= 130); if (state->ps[no].bits_na_1 > bits) { @@ -510,7 +510,7 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c } #else /* build wnaf representation for na. */ - state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A); + state->ps[no].bits_na = rustsecp256k1_v0_1_0_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A); if (state->ps[no].bits_na > bits) { bits = state->ps[no].bits_na; } @@ -525,41 +525,41 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c * the Z coordinate of the result once at the end. * The exception is the precomputed G table points, which are actually * affine. Compared to the base used for other points, they have a Z ratio - * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same + * of 1/Z, so we can use rustsecp256k1_v0_1_0_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ if (no > 0) { /* Compute the odd multiples in Jacobian form. */ - secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); for (np = 1; np < no; ++np) { - secp256k1_gej tmp = a[state->ps[np].input_pos]; + rustsecp256k1_v0_1_0_gej tmp = a[state->ps[np].input_pos]; #ifdef VERIFY - secp256k1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); + rustsecp256k1_v0_1_0_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); #endif - secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); - secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); - secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); + rustsecp256k1_v0_1_0_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); + rustsecp256k1_v0_1_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); + rustsecp256k1_v0_1_0_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); } /* Bring them to the same Z denominator. */ - secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); + rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); } else { - secp256k1_fe_set_int(&Z, 1); + rustsecp256k1_v0_1_0_fe_set_int(&Z, 1); } #ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); + rustsecp256k1_v0_1_0_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ - secp256k1_scalar_split_128(&ng_1, &ng_128, ng); + rustsecp256k1_v0_1_0_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ - bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); - bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); + bits_ng_1 = rustsecp256k1_v0_1_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); + bits_ng_128 = rustsecp256k1_v0_1_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } @@ -569,65 +569,65 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c } #else if (ng) { - bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); + bits_ng = rustsecp256k1_v0_1_0_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); if (bits_ng > bits) { bits = bits_ng; } } #endif - secp256k1_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; - secp256k1_gej_double_var(r, r, NULL); + rustsecp256k1_v0_1_0_gej_double_var(r, r, NULL); #ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_1_0_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_1_0_gej_add_zinv_var(r, r, &tmpa, &Z); } #else for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng && (n = wnaf_ng[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_1_0_gej_add_zinv_var(r, r, &tmpa, &Z); } #endif } if (!r->infinity) { - secp256k1_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &Z); } } -static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { - secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct secp256k1_strauss_point_state ps[1]; +static void rustsecp256k1_v0_1_0_ecmult(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_scalar *na, const rustsecp256k1_v0_1_0_scalar *ng) { + rustsecp256k1_v0_1_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + struct rustsecp256k1_v0_1_0_strauss_point_state ps[1]; #ifdef USE_ENDOMORPHISM - secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_1_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; #endif - struct secp256k1_strauss_state state; + struct rustsecp256k1_v0_1_0_strauss_state state; state.prej = prej; state.zr = zr; @@ -636,67 +636,67 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej state.pre_a_lam = pre_a_lam; #endif state.ps = ps; - secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); + rustsecp256k1_v0_1_0_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); } -static size_t secp256k1_strauss_scratch_size(size_t n_points) { +static size_t rustsecp256k1_v0_1_0_strauss_scratch_size(size_t n_points) { #ifdef USE_ENDOMORPHISM - static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); + static const size_t point_size = (2 * sizeof(rustsecp256k1_v0_1_0_ge) + sizeof(rustsecp256k1_v0_1_0_gej) + sizeof(rustsecp256k1_v0_1_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_1_0_strauss_point_state) + sizeof(rustsecp256k1_v0_1_0_gej) + sizeof(rustsecp256k1_v0_1_0_scalar); #else - static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); + static const size_t point_size = (sizeof(rustsecp256k1_v0_1_0_ge) + sizeof(rustsecp256k1_v0_1_0_gej) + sizeof(rustsecp256k1_v0_1_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_1_0_strauss_point_state) + sizeof(rustsecp256k1_v0_1_0_gej) + sizeof(rustsecp256k1_v0_1_0_scalar); #endif return n_points*point_size; } -static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - secp256k1_gej* points; - secp256k1_scalar* scalars; - struct secp256k1_strauss_state state; +static int rustsecp256k1_v0_1_0_ecmult_strauss_batch(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + rustsecp256k1_v0_1_0_gej* points; + rustsecp256k1_v0_1_0_scalar* scalars; + struct rustsecp256k1_v0_1_0_strauss_state state; size_t i; - const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); + const size_t scratch_checkpoint = rustsecp256k1_v0_1_0_scratch_checkpoint(error_callback, scratch); - secp256k1_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej)); - scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); - state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); - state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); + points = (rustsecp256k1_v0_1_0_gej*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_1_0_gej)); + scalars = (rustsecp256k1_v0_1_0_scalar*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_1_0_scalar)); + state.prej = (rustsecp256k1_v0_1_0_gej*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_1_0_gej)); + state.zr = (rustsecp256k1_v0_1_0_fe*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_1_0_fe)); #ifdef USE_ENDOMORPHISM - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); + state.pre_a = (rustsecp256k1_v0_1_0_ge*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_1_0_ge)); state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A); #else - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); + state.pre_a = (rustsecp256k1_v0_1_0_ge*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_1_0_ge)); #endif - state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); + state.ps = (struct rustsecp256k1_v0_1_0_strauss_point_state*)rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_1_0_strauss_point_state)); if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } for (i = 0; i < n_points; i++) { - secp256k1_ge point; + rustsecp256k1_v0_1_0_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - secp256k1_gej_set_ge(&points[i], &point); + rustsecp256k1_v0_1_0_gej_set_ge(&points[i], &point); } - secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } -/* Wrapper for secp256k1_ecmult_multi_func interface */ -static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { - return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); +/* Wrapper for rustsecp256k1_v0_1_0_ecmult_multi_func interface */ +static int rustsecp256k1_v0_1_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *actx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n) { + return rustsecp256k1_v0_1_0_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); } -static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { - return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); +static size_t rustsecp256k1_v0_1_0_strauss_max_points(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch *scratch) { + return rustsecp256k1_v0_1_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_1_0_strauss_scratch_size(1); } /** Convert a number to WNAF notation. @@ -706,25 +706,25 @@ static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callb * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ -static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { +static int rustsecp256k1_v0_1_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_1_0_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; - const secp256k1_scalar *work = s; + const rustsecp256k1_v0_1_0_scalar *work = s; - if (secp256k1_scalar_is_zero(s)) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } - if (secp256k1_scalar_is_even(s)) { + if (rustsecp256k1_v0_1_0_scalar_is_even(s)) { skew = 1; } - wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew; + wnaf[0] = rustsecp256k1_v0_1_0_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; @@ -732,7 +732,7 @@ static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { - int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_1_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } @@ -742,7 +742,7 @@ static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { pos = 1; while (pos <= max_pos) { - int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_1_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); @@ -768,14 +768,14 @@ static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { return skew; } -struct secp256k1_pippenger_point_state { +struct rustsecp256k1_v0_1_0_pippenger_point_state { int skew_na; size_t input_pos; }; -struct secp256k1_pippenger_state { +struct rustsecp256k1_v0_1_0_pippenger_state { int *wnaf_na; - struct secp256k1_pippenger_point_state* ps; + struct rustsecp256k1_v0_1_0_pippenger_point_state* ps; }; /* @@ -785,7 +785,7 @@ struct secp256k1_pippenger_state { * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ -static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) { +static int rustsecp256k1_v0_1_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_1_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_1_0_pippenger_state *state, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *sc, const rustsecp256k1_v0_1_0_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; @@ -793,55 +793,55 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi int j; for (np = 0; np < num; ++np) { - if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_1_0_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; - state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); + state->ps[no].skew_na = rustsecp256k1_v0_1_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } - secp256k1_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { - secp256k1_gej running_sum; + rustsecp256k1_v0_1_0_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { - secp256k1_gej_set_infinity(&buckets[j]); + rustsecp256k1_v0_1_0_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; - struct secp256k1_pippenger_point_state point_state = state->ps[np]; - secp256k1_ge tmp; + struct rustsecp256k1_v0_1_0_pippenger_point_state point_state = state->ps[np]; + rustsecp256k1_v0_1_0_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { - secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); - secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); + rustsecp256k1_v0_1_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_1_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; - secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; - secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); - secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); + rustsecp256k1_v0_1_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_1_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { - secp256k1_gej_double_var(r, r, NULL); + rustsecp256k1_v0_1_0_gej_double_var(r, r, NULL); } - secp256k1_gej_set_infinity(&running_sum); + rustsecp256k1_v0_1_0_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) @@ -851,13 +851,13 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { - secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); - secp256k1_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_1_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); + rustsecp256k1_v0_1_0_gej_add_var(r, r, &running_sum, NULL); } - secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); - secp256k1_gej_double_var(r, r, NULL); - secp256k1_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_1_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); + rustsecp256k1_v0_1_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_1_0_gej_add_var(r, r, &running_sum, NULL); } return 1; } @@ -866,7 +866,7 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ -static int secp256k1_pippenger_bucket_window(size_t n) { +static int rustsecp256k1_v0_1_0_pippenger_bucket_window(size_t n) { #ifdef USE_ENDOMORPHISM if (n <= 1) { return 1; @@ -923,7 +923,7 @@ static int secp256k1_pippenger_bucket_window(size_t n) { /** * Returns the maximum optimal number of points for a bucket_window. */ -static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { +static size_t rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { #ifdef USE_ENDOMORPHISM case 1: return 1; @@ -958,18 +958,18 @@ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { #ifdef USE_ENDOMORPHISM -SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { - secp256k1_scalar tmp = *s1; - secp256k1_scalar_split_lambda(s1, s2, &tmp); - secp256k1_ge_mul_lambda(p2, p1); +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_ecmult_endo_split(rustsecp256k1_v0_1_0_scalar *s1, rustsecp256k1_v0_1_0_scalar *s2, rustsecp256k1_v0_1_0_ge *p1, rustsecp256k1_v0_1_0_ge *p2) { + rustsecp256k1_v0_1_0_scalar tmp = *s1; + rustsecp256k1_v0_1_0_scalar_split_lambda(s1, s2, &tmp); + rustsecp256k1_v0_1_0_ge_mul_lambda(p2, p1); - if (secp256k1_scalar_is_high(s1)) { - secp256k1_scalar_negate(s1, s1); - secp256k1_ge_neg(p1, p1); + if (rustsecp256k1_v0_1_0_scalar_is_high(s1)) { + rustsecp256k1_v0_1_0_scalar_negate(s1, s1); + rustsecp256k1_v0_1_0_ge_neg(p1, p1); } - if (secp256k1_scalar_is_high(s2)) { - secp256k1_scalar_negate(s2, s2); - secp256k1_ge_neg(p2, p2); + if (rustsecp256k1_v0_1_0_scalar_is_high(s2)) { + rustsecp256k1_v0_1_0_scalar_negate(s2, s2); + rustsecp256k1_v0_1_0_ge_neg(p2, p2); } } #endif @@ -978,18 +978,18 @@ SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, s * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ -static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { +static size_t rustsecp256k1_v0_1_0_pippenger_scratch_size(size_t n_points, int bucket_window) { #ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; #else size_t entries = n_points + 1; #endif - size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); - return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; + size_t entry_size = sizeof(rustsecp256k1_v0_1_0_ge) + sizeof(rustsecp256k1_v0_1_0_scalar) + sizeof(struct rustsecp256k1_v0_1_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); + return (sizeof(rustsecp256k1_v0_1_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_1_0_pippenger_state) + entries * entry_size; } -static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); +static int rustsecp256k1_v0_1_0_ecmult_pippenger_batch(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + const size_t scratch_checkpoint = rustsecp256k1_v0_1_0_scratch_checkpoint(error_callback, scratch); /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ @@ -998,81 +998,81 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call #else size_t entries = n_points + 1; #endif - secp256k1_ge *points; - secp256k1_scalar *scalars; - secp256k1_gej *buckets; - struct secp256k1_pippenger_state *state_space; + rustsecp256k1_v0_1_0_ge *points; + rustsecp256k1_v0_1_0_scalar *scalars; + rustsecp256k1_v0_1_0_gej *buckets; + struct rustsecp256k1_v0_1_0_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; (void)ctx; - secp256k1_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - bucket_window = secp256k1_pippenger_bucket_window(n_points); - points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); - scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); - state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space)); + bucket_window = rustsecp256k1_v0_1_0_pippenger_bucket_window(n_points); + points = (rustsecp256k1_v0_1_0_ge *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); + scalars = (rustsecp256k1_v0_1_0_scalar *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); + state_space = (struct rustsecp256k1_v0_1_0_pippenger_state *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); if (points == NULL || scalars == NULL || state_space == NULL) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); - state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); - buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<ps = (struct rustsecp256k1_v0_1_0_pippenger_point_state *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); + state_space->wnaf_na = (int *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); + buckets = (rustsecp256k1_v0_1_0_gej *) rustsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, (1<ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; - points[0] = secp256k1_ge_const_g; + points[0] = rustsecp256k1_v0_1_0_ge_const_g; idx++; #ifdef USE_ENDOMORPHISM - secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); + rustsecp256k1_v0_1_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; #endif } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } idx++; #ifdef USE_ENDOMORPHISM - secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); + rustsecp256k1_v0_1_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; #endif point_idx++; } - secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); + rustsecp256k1_v0_1_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { - secp256k1_scalar_clear(&scalars[i]); + rustsecp256k1_v0_1_0_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } @@ -1118,34 +1118,34 @@ static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_cal /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ -static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { +static int rustsecp256k1_v0_1_0_ecmult_multi_simple_var(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; - secp256k1_scalar szero; - secp256k1_gej tmpj; + rustsecp256k1_v0_1_0_scalar szero; + rustsecp256k1_v0_1_0_gej tmpj; - secp256k1_scalar_set_int(&szero, 0); - secp256k1_gej_set_infinity(r); - secp256k1_gej_set_infinity(&tmpj); + rustsecp256k1_v0_1_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_1_0_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ - secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); + rustsecp256k1_v0_1_0_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { - secp256k1_ge point; - secp256k1_gej pointj; - secp256k1_scalar scalar; + rustsecp256k1_v0_1_0_ge point; + rustsecp256k1_v0_1_0_gej pointj; + rustsecp256k1_v0_1_0_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ - secp256k1_gej_set_ge(&pointj, &point); - secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); - secp256k1_gej_add_var(r, r, &tmpj, NULL); + rustsecp256k1_v0_1_0_gej_set_ge(&pointj, &point); + rustsecp256k1_v0_1_0_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); + rustsecp256k1_v0_1_0_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ -static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { +static int rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } @@ -1163,50 +1163,50 @@ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n return 1; } -typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); -static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { +typedef int (*rustsecp256k1_v0_1_0_ecmult_multi_func)(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context*, rustsecp256k1_v0_1_0_scratch*, rustsecp256k1_v0_1_0_gej*, const rustsecp256k1_v0_1_0_scalar*, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void*, size_t); +static int rustsecp256k1_v0_1_0_ecmult_multi_var(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; - int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); + int (*f)(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context*, rustsecp256k1_v0_1_0_scratch*, rustsecp256k1_v0_1_0_gej*, const rustsecp256k1_v0_1_0_scalar*, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; - secp256k1_gej_set_infinity(r); + rustsecp256k1_v0_1_0_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { - secp256k1_scalar szero; - secp256k1_scalar_set_int(&szero, 0); - secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc); + rustsecp256k1_v0_1_0_scalar szero; + rustsecp256k1_v0_1_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_1_0_ecmult(ctx, r, r, &szero, inp_g_sc); return 1; } if (scratch == NULL) { - return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + return rustsecp256k1_v0_1_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ - if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) { - return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_1_0_pippenger_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_1_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { - f = secp256k1_ecmult_pippenger_batch; + f = rustsecp256k1_v0_1_0_ecmult_pippenger_batch; } else { - if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) { - return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_1_0_strauss_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_1_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } - f = secp256k1_ecmult_strauss_batch; + f = rustsecp256k1_v0_1_0_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; - secp256k1_gej tmp; + rustsecp256k1_v0_1_0_gej tmp; if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } - secp256k1_gej_add_var(r, r, &tmp, NULL); + rustsecp256k1_v0_1_0_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; diff --git a/secp256k1-sys/depend/secp256k1/src/field.h b/secp256k1-sys/depend/secp256k1/src/field.h index bb6692a..7a741e2 100644 --- a/secp256k1-sys/depend/secp256k1/src/field.h +++ b/secp256k1-sys/depend/secp256k1/src/field.h @@ -33,100 +33,100 @@ #include "util.h" /** Normalize a field element. */ -static void secp256k1_fe_normalize(secp256k1_fe *r); +static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r); /** Weakly normalize a field element: reduce it magnitude to 1, but don't fully normalize. */ -static void secp256k1_fe_normalize_weak(secp256k1_fe *r); +static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r); /** Normalize a field element, without constant-time guarantee. */ -static void secp256k1_fe_normalize_var(secp256k1_fe *r); +static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r); /** Verify whether a field element represents zero i.e. would normalize to a zero value. The field * implementation may optionally normalize the input, but this should not be relied upon. */ -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r); +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r); /** Verify whether a field element represents zero i.e. would normalize to a zero value. The field * implementation may optionally normalize the input, but this should not be relied upon. */ -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r); +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r); /** Set a field element equal to a small integer. Resulting field element is normalized. */ -static void secp256k1_fe_set_int(secp256k1_fe *r, int a); +static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a); /** Sets a field element equal to zero, initializing all fields. */ -static void secp256k1_fe_clear(secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a); /** Verify whether a field element is zero. Requires the input to be normalized. */ -static int secp256k1_fe_is_zero(const secp256k1_fe *a); +static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a); /** Check the "oddness" of a field element. Requires the input to be normalized. */ -static int secp256k1_fe_is_odd(const secp256k1_fe *a); +static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a); /** Compare two field elements. Requires magnitude-1 inputs. */ -static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b); +static int rustsecp256k1_v0_1_0_fe_equal(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b); -/** Same as secp256k1_fe_equal, but may be variable time. */ -static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b); +/** Same as rustsecp256k1_v0_1_0_fe_equal, but may be variable time. */ +static int rustsecp256k1_v0_1_0_fe_equal_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b); /** Compare two field elements. Requires both inputs to be normalized */ -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b); +static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b); /** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */ -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a); +static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a); /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a); /** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input * as an argument. The magnitude of the output is one higher. */ -static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m); +static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m); /** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that * small integer. */ -static void secp256k1_fe_mul_int(secp256k1_fe *r, int a); +static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a); /** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */ -static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a); /** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b); +static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b); /** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a); /** If a has a square root, it is computed in r and 1 is returned. If a does not * have a square root, the root of its negation is computed and 0 is returned. * The input's magnitude can be at most 8. The output magnitude is 1 (but not * guaranteed to be normalized). The result in r will always be a square * itself. */ -static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a); +static int rustsecp256k1_v0_1_0_fe_sqrt(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a); /** Checks whether a field element is a quadratic residue. */ -static int secp256k1_fe_is_quad_var(const secp256k1_fe *a); +static int rustsecp256k1_v0_1_0_fe_is_quad_var(const rustsecp256k1_v0_1_0_fe *a); /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_inv(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a); -/** Potentially faster version of secp256k1_fe_inv, without constant-time guarantee. */ -static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a); +/** Potentially faster version of rustsecp256k1_v0_1_0_fe_inv, without constant-time guarantee. */ +static void rustsecp256k1_v0_1_0_fe_inv_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a); /** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and * outputs must not overlap in memory. */ -static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len); +static void rustsecp256k1_v0_1_0_fe_inv_all_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, size_t len); /** Convert a field element to the storage type. */ -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); +static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a); /** Convert a field element back from the storage type. */ -static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a); +static void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag); +static void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag); +static void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag); #endif /* SECP256K1_FIELD_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26.h b/secp256k1-sys/depend/secp256k1/src/field_10x26.h index 5ff03c8..7b1961f 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} secp256k1_fe; +} rustsecp256k1_v0_1_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -42,7 +42,7 @@ typedef struct { typedef struct { uint32_t n[8]; -} secp256k1_fe_storage; +} rustsecp256k1_v0_1_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} #define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h index 4ae4fdc..263ae5a 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h @@ -11,7 +11,7 @@ #include "field.h" #ifdef VERIFY -static void secp256k1_fe_verify(const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_verify(const rustsecp256k1_v0_1_0_fe *a) { const uint32_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; r &= (d[0] <= 0x3FFFFFFUL * m); @@ -39,7 +39,7 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) { } #endif -static void secp256k1_fe_normalize(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -90,11 +90,11 @@ static void secp256k1_fe_normalize(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -121,11 +121,11 @@ static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_normalize_var(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -177,11 +177,11 @@ static void secp256k1_fe_normalize_var(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -210,7 +210,7 @@ static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r) { uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; uint32_t z0, z1; uint32_t x; @@ -262,34 +262,34 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a) { const uint32_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; } -SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -300,13 +300,13 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { } } -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { +static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); + rustsecp256k1_v0_1_0_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(b); #endif for (i = 9; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -319,7 +319,7 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { return 0; } -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a) { r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24); r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22); r->n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20); @@ -337,16 +337,16 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif return 1; } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r[0] = (a->n[9] >> 14) & 0xff; r[1] = (a->n[9] >> 6) & 0xff; @@ -382,10 +382,10 @@ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { r[31] = a->n[0] & 0xff; } -SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0]; r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1]; @@ -400,11 +400,11 @@ SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -418,13 +418,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -439,15 +439,15 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_f #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } #if defined(USE_EXTERNAL_ASM) /* External assembler implementation */ -void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); -void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); +void rustsecp256k1_v0_1_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); +void rustsecp256k1_v0_1_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else @@ -457,7 +457,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7; @@ -787,7 +787,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7; @@ -1062,37 +1062,37 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t } #endif -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); + rustsecp256k1_v0_1_0_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - secp256k1_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_1_0_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif - secp256k1_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_1_0_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag) { uint32_t mask0, mask1; mask0 = flag + ~((uint32_t)0); mask1 = ~mask0; @@ -1114,7 +1114,7 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_ #endif } -static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag) { uint32_t mask0, mask1; mask0 = flag + ~((uint32_t)0); mask1 = ~mask0; @@ -1128,7 +1128,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); } -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -1142,7 +1142,7 @@ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe r->n[7] = a->n[8] >> 16 | a->n[9] << 10; } -static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a) { r->n[0] = a->n[0] & 0x3FFFFFFUL; r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL); r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL); diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52.h b/secp256k1-sys/depend/secp256k1/src/field_5x52.h index fc5bfe3..85a1cb9 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} secp256k1_fe; +} rustsecp256k1_v0_1_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -37,7 +37,7 @@ typedef struct { typedef struct { uint64_t n[4]; -} secp256k1_fe_storage; +} rustsecp256k1_v0_1_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ (d0) | (((uint64_t)(d1)) << 32), \ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h index 1fc3171..9faa4af 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h @@ -14,7 +14,7 @@ #ifndef SECP256K1_FIELD_INNER5X52_IMPL_H #define SECP256K1_FIELD_INNER5X52_IMPL_H -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c @@ -284,7 +284,7 @@ __asm__ __volatile__( ); } -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h index f426332..b4f794f 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h @@ -29,7 +29,7 @@ */ #ifdef VERIFY -static void secp256k1_fe_verify(const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_verify(const rustsecp256k1_v0_1_0_fe *a) { const uint64_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -50,7 +50,7 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) { } #endif -static void secp256k1_fe_normalize(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -89,11 +89,11 @@ static void secp256k1_fe_normalize(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -113,11 +113,11 @@ static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_normalize_var(secp256k1_fe *r) { +static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -157,11 +157,11 @@ static void secp256k1_fe_normalize_var(secp256k1_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ @@ -184,7 +184,7 @@ static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { +static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r) { uint64_t t0, t1, t2, t3, t4; uint64_t z0, z1; uint64_t x; @@ -225,34 +225,34 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a) { const uint64_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; } -SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -263,13 +263,13 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { } } -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { +static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); + rustsecp256k1_v0_1_0_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(b); #endif for (i = 4; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -282,7 +282,7 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { return 0; } -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a) { r->n[0] = (uint64_t)a[31] | ((uint64_t)a[30] << 8) | ((uint64_t)a[29] << 16) @@ -323,16 +323,16 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif return 1; } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r[0] = (a->n[4] >> 40) & 0xFF; r[1] = (a->n[4] >> 32) & 0xFF; @@ -368,10 +368,10 @@ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { r[31] = a->n[0] & 0xFF; } -SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0]; r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1]; @@ -381,11 +381,11 @@ SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -394,13 +394,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -410,41 +410,41 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_f #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); + rustsecp256k1_v0_1_0_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - secp256k1_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_1_0_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - secp256k1_fe_verify(a); + rustsecp256k1_v0_1_0_fe_verify(a); #endif - secp256k1_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_1_0_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + rustsecp256k1_v0_1_0_fe_verify(r); #endif } -static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag) { uint64_t mask0, mask1; mask0 = flag + ~((uint64_t)0); mask1 = ~mask0; @@ -461,7 +461,7 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_ #endif } -static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag) { uint64_t mask0, mask1; mask0 = flag + ~((uint64_t)0); mask1 = ~mask0; @@ -471,7 +471,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); } -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -481,7 +481,7 @@ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe r->n[3] = a->n[3] >> 36 | a->n[4] << 16; } -static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a) { r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h index bcbfb92..7ee9ed3 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h @@ -15,7 +15,7 @@ #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { uint128_t c, d; uint64_t t3, t4, tx, u0; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; @@ -154,7 +154,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { uint128_t c, d; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; int64_t t3, t4, tx, u0; diff --git a/secp256k1-sys/depend/secp256k1/src/field_impl.h b/secp256k1-sys/depend/secp256k1/src/field_impl.h index 6070cac..442073d 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_impl.h @@ -22,21 +22,21 @@ #error "Please select field implementation" #endif -SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe na; - secp256k1_fe_negate(&na, a, 1); - secp256k1_fe_add(&na, b); - return secp256k1_fe_normalizes_to_zero(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_equal(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { + rustsecp256k1_v0_1_0_fe na; + rustsecp256k1_v0_1_0_fe_negate(&na, a, 1); + rustsecp256k1_v0_1_0_fe_add(&na, b); + return rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&na); } -SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe na; - secp256k1_fe_negate(&na, a, 1); - secp256k1_fe_add(&na, b); - return secp256k1_fe_normalizes_to_zero_var(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_equal_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { + rustsecp256k1_v0_1_0_fe na; + rustsecp256k1_v0_1_0_fe_negate(&na, a, 1); + rustsecp256k1_v0_1_0_fe_add(&na, b); + return rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&na); } -static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { +static int rustsecp256k1_v0_1_0_fe_sqrt(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * @@ -46,7 +46,7 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { * Also because (p+1)/4 is an even number, the computed square root is * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). */ - secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; + rustsecp256k1_v0_1_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; VERIFY_CHECK(r != a); @@ -56,88 +56,88 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ - secp256k1_fe_sqr(&x2, a); - secp256k1_fe_mul(&x2, &x2, a); + rustsecp256k1_v0_1_0_fe_sqr(&x2, a); + rustsecp256k1_v0_1_0_fe_mul(&x2, &x2, a); - secp256k1_fe_sqr(&x3, &x2); - secp256k1_fe_mul(&x3, &x3, a); + rustsecp256k1_v0_1_0_fe_sqr(&x3, &x2); + rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x6, &x6); + rustsecp256k1_v0_1_0_fe_sqr(&x6, &x6); } - secp256k1_fe_mul(&x6, &x6, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x9, &x9); + rustsecp256k1_v0_1_0_fe_sqr(&x9, &x9); } - secp256k1_fe_mul(&x9, &x9, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { - secp256k1_fe_sqr(&x11, &x11); + rustsecp256k1_v0_1_0_fe_sqr(&x11, &x11); } - secp256k1_fe_mul(&x11, &x11, &x2); + rustsecp256k1_v0_1_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { - secp256k1_fe_sqr(&x22, &x22); + rustsecp256k1_v0_1_0_fe_sqr(&x22, &x22); } - secp256k1_fe_mul(&x22, &x22, &x11); + rustsecp256k1_v0_1_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { - secp256k1_fe_sqr(&x44, &x44); + rustsecp256k1_v0_1_0_fe_sqr(&x44, &x44); } - secp256k1_fe_mul(&x44, &x44, &x22); + rustsecp256k1_v0_1_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x88, &x88); + rustsecp256k1_v0_1_0_fe_sqr(&x88, &x88); } - secp256k1_fe_mul(&x88, &x88, &x44); + rustsecp256k1_v0_1_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { - secp256k1_fe_sqr(&x176, &x176); + rustsecp256k1_v0_1_0_fe_sqr(&x176, &x176); } - secp256k1_fe_mul(&x176, &x176, &x88); + rustsecp256k1_v0_1_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x220, &x220); + rustsecp256k1_v0_1_0_fe_sqr(&x220, &x220); } - secp256k1_fe_mul(&x220, &x220, &x44); + rustsecp256k1_v0_1_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x223, &x223); + rustsecp256k1_v0_1_0_fe_sqr(&x223, &x223); } - secp256k1_fe_mul(&x223, &x223, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(&t1, &t1, &x22); + rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x22); for (j=0; j<6; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(&t1, &t1, &x2); - secp256k1_fe_sqr(&t1, &t1); - secp256k1_fe_sqr(r, &t1); + rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x2); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(r, &t1); /* Check that a square root was actually calculated */ - secp256k1_fe_sqr(&t1, r); - return secp256k1_fe_equal(&t1, a); + rustsecp256k1_v0_1_0_fe_sqr(&t1, r); + return rustsecp256k1_v0_1_0_fe_equal(&t1, a); } -static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { - secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; +static void rustsecp256k1_v0_1_0_fe_inv(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { + rustsecp256k1_v0_1_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in @@ -145,93 +145,93 @@ static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ - secp256k1_fe_sqr(&x2, a); - secp256k1_fe_mul(&x2, &x2, a); + rustsecp256k1_v0_1_0_fe_sqr(&x2, a); + rustsecp256k1_v0_1_0_fe_mul(&x2, &x2, a); - secp256k1_fe_sqr(&x3, &x2); - secp256k1_fe_mul(&x3, &x3, a); + rustsecp256k1_v0_1_0_fe_sqr(&x3, &x2); + rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x6, &x6); + rustsecp256k1_v0_1_0_fe_sqr(&x6, &x6); } - secp256k1_fe_mul(&x6, &x6, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x9, &x9); + rustsecp256k1_v0_1_0_fe_sqr(&x9, &x9); } - secp256k1_fe_mul(&x9, &x9, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { - secp256k1_fe_sqr(&x11, &x11); + rustsecp256k1_v0_1_0_fe_sqr(&x11, &x11); } - secp256k1_fe_mul(&x11, &x11, &x2); + rustsecp256k1_v0_1_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { - secp256k1_fe_sqr(&x22, &x22); + rustsecp256k1_v0_1_0_fe_sqr(&x22, &x22); } - secp256k1_fe_mul(&x22, &x22, &x11); + rustsecp256k1_v0_1_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { - secp256k1_fe_sqr(&x44, &x44); + rustsecp256k1_v0_1_0_fe_sqr(&x44, &x44); } - secp256k1_fe_mul(&x44, &x44, &x22); + rustsecp256k1_v0_1_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x88, &x88); + rustsecp256k1_v0_1_0_fe_sqr(&x88, &x88); } - secp256k1_fe_mul(&x88, &x88, &x44); + rustsecp256k1_v0_1_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { - secp256k1_fe_sqr(&x176, &x176); + rustsecp256k1_v0_1_0_fe_sqr(&x176, &x176); } - secp256k1_fe_mul(&x176, &x176, &x88); + rustsecp256k1_v0_1_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x220, &x220); + rustsecp256k1_v0_1_0_fe_sqr(&x220, &x220); } - secp256k1_fe_mul(&x220, &x220, &x44); + rustsecp256k1_v0_1_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x223, &x223); + rustsecp256k1_v0_1_0_fe_sqr(&x223, &x223); } - secp256k1_fe_mul(&x223, &x223, &x3); + rustsecp256k1_v0_1_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(&t1, &t1, &x22); + rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x22); for (j=0; j<5; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(&t1, &t1, a); + rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, a); for (j=0; j<3; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(&t1, &t1, &x2); + rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x2); for (j=0; j<2; j++) { - secp256k1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1); } - secp256k1_fe_mul(r, a, &t1); + rustsecp256k1_v0_1_0_fe_mul(r, a, &t1); } -static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { +static void rustsecp256k1_v0_1_0_fe_inv_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) { #if defined(USE_FIELD_INV_BUILTIN) - secp256k1_fe_inv(r, a); + rustsecp256k1_v0_1_0_fe_inv(r, a); #elif defined(USE_FIELD_INV_NUM) - secp256k1_num n, m; - static const secp256k1_fe negone = SECP256K1_FE_CONST( + rustsecp256k1_v0_1_0_num n, m; + static const rustsecp256k1_v0_1_0_fe negone = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL ); @@ -244,27 +244,27 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { }; unsigned char b[32]; int res; - secp256k1_fe c = *a; - secp256k1_fe_normalize_var(&c); - secp256k1_fe_get_b32(b, &c); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_num_set_bin(&m, prime, 32); - secp256k1_num_mod_inverse(&n, &n, &m); - secp256k1_num_get_bin(b, 32, &n); - res = secp256k1_fe_set_b32(r, b); + rustsecp256k1_v0_1_0_fe c = *a; + rustsecp256k1_v0_1_0_fe_normalize_var(&c); + rustsecp256k1_v0_1_0_fe_get_b32(b, &c); + rustsecp256k1_v0_1_0_num_set_bin(&n, b, 32); + rustsecp256k1_v0_1_0_num_set_bin(&m, prime, 32); + rustsecp256k1_v0_1_0_num_mod_inverse(&n, &n, &m); + rustsecp256k1_v0_1_0_num_get_bin(b, 32, &n); + res = rustsecp256k1_v0_1_0_fe_set_b32(r, b); (void)res; VERIFY_CHECK(res); /* Verify the result is the (unique) valid inverse using non-GMP code. */ - secp256k1_fe_mul(&c, &c, r); - secp256k1_fe_add(&c, &negone); - CHECK(secp256k1_fe_normalizes_to_zero_var(&c)); + rustsecp256k1_v0_1_0_fe_mul(&c, &c, r); + rustsecp256k1_v0_1_0_fe_add(&c, &negone); + CHECK(rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&c)); #else #error "Please select field inverse implementation" #endif } -static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) { - secp256k1_fe u; +static void rustsecp256k1_v0_1_0_fe_inv_all_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, size_t len) { + rustsecp256k1_v0_1_0_fe u; size_t i; if (len < 1) { return; @@ -276,25 +276,25 @@ static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, siz i = 0; while (++i < len) { - secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]); + rustsecp256k1_v0_1_0_fe_mul(&r[i], &r[i - 1], &a[i]); } - secp256k1_fe_inv_var(&u, &r[--i]); + rustsecp256k1_v0_1_0_fe_inv_var(&u, &r[--i]); while (i > 0) { size_t j = i--; - secp256k1_fe_mul(&r[j], &r[i], &u); - secp256k1_fe_mul(&u, &u, &a[j]); + rustsecp256k1_v0_1_0_fe_mul(&r[j], &r[i], &u); + rustsecp256k1_v0_1_0_fe_mul(&u, &u, &a[j]); } r[0] = u; } -static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { +static int rustsecp256k1_v0_1_0_fe_is_quad_var(const rustsecp256k1_v0_1_0_fe *a) { #ifndef USE_NUM_NONE unsigned char b[32]; - secp256k1_num n; - secp256k1_num m; + rustsecp256k1_v0_1_0_num n; + rustsecp256k1_v0_1_0_num m; /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ static const unsigned char prime[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, @@ -303,15 +303,15 @@ static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F }; - secp256k1_fe c = *a; - secp256k1_fe_normalize_var(&c); - secp256k1_fe_get_b32(b, &c); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_num_set_bin(&m, prime, 32); - return secp256k1_num_jacobi(&n, &m) >= 0; + rustsecp256k1_v0_1_0_fe c = *a; + rustsecp256k1_v0_1_0_fe_normalize_var(&c); + rustsecp256k1_v0_1_0_fe_get_b32(b, &c); + rustsecp256k1_v0_1_0_num_set_bin(&n, b, 32); + rustsecp256k1_v0_1_0_num_set_bin(&m, prime, 32); + return rustsecp256k1_v0_1_0_num_jacobi(&n, &m) >= 0; #else - secp256k1_fe r; - return secp256k1_fe_sqrt(&r, a); + rustsecp256k1_v0_1_0_fe r; + return rustsecp256k1_v0_1_0_fe_sqrt(&r, a); #endif } diff --git a/secp256k1-sys/depend/secp256k1/src/gen_context.c b/secp256k1-sys/depend/secp256k1/src/gen_context.c index 82c605c..4638d98 100644 --- a/secp256k1-sys/depend/secp256k1/src/gen_context.c +++ b/secp256k1-sys/depend/secp256k1/src/gen_context.c @@ -20,13 +20,13 @@ static void default_error_callback_fn(const char* str, void* data) { abort(); } -static const secp256k1_callback default_error_callback = { +static const rustsecp256k1_v0_1_0_callback default_error_callback = { default_error_callback_fn, NULL }; int main(int argc, char **argv) { - secp256k1_ecmult_gen_context ctx; + rustsecp256k1_v0_1_0_ecmult_gen_context ctx; void *prealloc, *base; int inner; int outer; @@ -45,12 +45,12 @@ int main(int argc, char **argv) { fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); fprintf(fp, "#include \"src/group.h\"\n"); fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n"); - fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n"); + fprintf(fp, "static const rustsecp256k1_v0_1_0_ge_storage rustsecp256k1_v0_1_0_ecmult_static_context[64][16] = {\n"); base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE); prealloc = base; - secp256k1_ecmult_gen_context_init(&ctx); - secp256k1_ecmult_gen_context_build(&ctx, &prealloc); + rustsecp256k1_v0_1_0_ecmult_gen_context_init(&ctx); + rustsecp256k1_v0_1_0_ecmult_gen_context_build(&ctx, &prealloc); for(outer = 0; outer != 64; outer++) { fprintf(fp,"{\n"); for(inner = 0; inner != 16; inner++) { @@ -68,7 +68,7 @@ int main(int argc, char **argv) { } } fprintf(fp,"};\n"); - secp256k1_ecmult_gen_context_clear(&ctx); + rustsecp256k1_v0_1_0_ecmult_gen_context_clear(&ctx); free(base); fprintf(fp, "#undef SC\n"); diff --git a/secp256k1-sys/depend/secp256k1/src/group.h b/secp256k1-sys/depend/secp256k1/src/group.h index 8e122ab..2d1ae10 100644 --- a/secp256k1-sys/depend/secp256k1/src/group.h +++ b/secp256k1-sys/depend/secp256k1/src/group.h @@ -12,131 +12,131 @@ /** A group element of the secp256k1 curve, in affine coordinates. */ typedef struct { - secp256k1_fe x; - secp256k1_fe y; + rustsecp256k1_v0_1_0_fe x; + rustsecp256k1_v0_1_0_fe y; int infinity; /* whether this represents the point at infinity */ -} secp256k1_ge; +} rustsecp256k1_v0_1_0_ge; #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} /** A group element of the secp256k1 curve, in jacobian coordinates. */ typedef struct { - secp256k1_fe x; /* actual X: x/z^2 */ - secp256k1_fe y; /* actual Y: y/z^3 */ - secp256k1_fe z; + rustsecp256k1_v0_1_0_fe x; /* actual X: x/z^2 */ + rustsecp256k1_v0_1_0_fe y; /* actual Y: y/z^3 */ + rustsecp256k1_v0_1_0_fe z; int infinity; /* whether this represents the point at infinity */ -} secp256k1_gej; +} rustsecp256k1_v0_1_0_gej; #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} typedef struct { - secp256k1_fe_storage x; - secp256k1_fe_storage y; -} secp256k1_ge_storage; + rustsecp256k1_v0_1_0_fe_storage x; + rustsecp256k1_v0_1_0_fe_storage y; +} rustsecp256k1_v0_1_0_ge_storage; #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} #define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) /** Set a group element equal to the point with given X and Y coordinates */ -static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y); +static void rustsecp256k1_v0_1_0_ge_set_xy(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_fe *y); /** Set a group element (affine) equal to the point with the given X coordinate * and a Y coordinate that is a quadratic residue modulo p. The return value * is true iff a coordinate with the given X coordinate exists. */ -static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x); +static int rustsecp256k1_v0_1_0_ge_set_xquad(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ -static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd); +static int rustsecp256k1_v0_1_0_ge_set_xo_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, int odd); /** Check whether a group element is the point at infinity. */ -static int secp256k1_ge_is_infinity(const secp256k1_ge *a); +static int rustsecp256k1_v0_1_0_ge_is_infinity(const rustsecp256k1_v0_1_0_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ -static int secp256k1_ge_is_valid_var(const secp256k1_ge *a); +static int rustsecp256k1_v0_1_0_ge_is_valid_var(const rustsecp256k1_v0_1_0_ge *a); -static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); +static void rustsecp256k1_v0_1_0_ge_neg(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a); /** Set a group element equal to another which is given in jacobian coordinates */ -static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); +static void rustsecp256k1_v0_1_0_ge_set_gej(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len); +static void rustsecp256k1_v0_1_0_ge_set_all_gej_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y * coordinates of the result are stored in r, the common z coordinate is * stored in globalz. */ -static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr); +static void rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_fe *globalz, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zr); /** Set a group element (affine) equal to the point at infinity. */ -static void secp256k1_ge_set_infinity(secp256k1_ge *r); +static void rustsecp256k1_v0_1_0_ge_set_infinity(rustsecp256k1_v0_1_0_ge *r); /** Set a group element (jacobian) equal to the point at infinity. */ -static void secp256k1_gej_set_infinity(secp256k1_gej *r); +static void rustsecp256k1_v0_1_0_gej_set_infinity(rustsecp256k1_v0_1_0_gej *r); /** Set a group element (jacobian) equal to another which is given in affine coordinates. */ -static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a); +static void rustsecp256k1_v0_1_0_gej_set_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a); /** Compare the X coordinate of a group element (jacobian). */ -static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a); +static int rustsecp256k1_v0_1_0_gej_eq_x_var(const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_gej *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); +static void rustsecp256k1_v0_1_0_gej_neg(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a); /** Check whether a group element is the point at infinity. */ -static int secp256k1_gej_is_infinity(const secp256k1_gej *a); +static int rustsecp256k1_v0_1_0_gej_is_infinity(const rustsecp256k1_v0_1_0_gej *a); /** Check whether a group element's y coordinate is a quadratic residue. */ -static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); +static int rustsecp256k1_v0_1_0_gej_has_quad_y_var(const rustsecp256k1_v0_1_0_gej *a); /** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). * a may not be zero. Constant time. */ -static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); +static void rustsecp256k1_v0_1_0_gej_double_nonzero(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr); /** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */ -static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); +static void rustsecp256k1_v0_1_0_gej_double_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr); /** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ -static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr); +static void rustsecp256k1_v0_1_0_gej_add_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_gej *b, rustsecp256k1_v0_1_0_fe *rzr); /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ -static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b); +static void rustsecp256k1_v0_1_0_gej_add_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b); /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient - than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time + than rustsecp256k1_v0_1_0_gej_add_var. It is identical to rustsecp256k1_v0_1_0_gej_add_ge but without constant-time guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ -static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr); +static void rustsecp256k1_v0_1_0_gej_add_ge_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, rustsecp256k1_v0_1_0_fe *rzr); /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ -static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv); +static void rustsecp256k1_v0_1_0_gej_add_zinv_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, const rustsecp256k1_v0_1_0_fe *bzinv); #ifdef USE_ENDOMORPHISM /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ -static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a); +static void rustsecp256k1_v0_1_0_ge_mul_lambda(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a); #endif -/** Clear a secp256k1_gej to prevent leaking sensitive information. */ -static void secp256k1_gej_clear(secp256k1_gej *r); +/** Clear a rustsecp256k1_v0_1_0_gej to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_1_0_gej_clear(rustsecp256k1_v0_1_0_gej *r); -/** Clear a secp256k1_ge to prevent leaking sensitive information. */ -static void secp256k1_ge_clear(secp256k1_ge *r); +/** Clear a rustsecp256k1_v0_1_0_ge to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_1_0_ge_clear(rustsecp256k1_v0_1_0_ge *r); /** Convert a group element to the storage type. */ -static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a); +static void rustsecp256k1_v0_1_0_ge_to_storage(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge *a); /** Convert a group element back from the storage type. */ -static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a); +static void rustsecp256k1_v0_1_0_ge_from_storage(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag); +static void rustsecp256k1_v0_1_0_ge_storage_cmov(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge_storage *a, int flag); /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ -static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b); +static void rustsecp256k1_v0_1_0_gej_rescale(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_fe *b); #endif /* SECP256K1_GROUP_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/group_impl.h b/secp256k1-sys/depend/secp256k1/src/group_impl.h index 9b93c39..c113fd4 100644 --- a/secp256k1-sys/depend/secp256k1/src/group_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/group_impl.h @@ -38,7 +38,7 @@ */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 199 -static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST( 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, @@ -47,7 +47,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( static const int CURVE_B = 4; # elif EXHAUSTIVE_TEST_ORDER == 13 -static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST( 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, @@ -61,7 +61,7 @@ static const int CURVE_B = 2; /** Generator for secp256k1, value 'g' defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ -static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST( 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, @@ -71,63 +71,63 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( static const int CURVE_B = 7; #endif -static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { - secp256k1_fe zi2; - secp256k1_fe zi3; - secp256k1_fe_sqr(&zi2, zi); - secp256k1_fe_mul(&zi3, &zi2, zi); - secp256k1_fe_mul(&r->x, &a->x, &zi2); - secp256k1_fe_mul(&r->y, &a->y, &zi3); +static void rustsecp256k1_v0_1_0_ge_set_gej_zinv(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zi) { + rustsecp256k1_v0_1_0_fe zi2; + rustsecp256k1_v0_1_0_fe zi3; + rustsecp256k1_v0_1_0_fe_sqr(&zi2, zi); + rustsecp256k1_v0_1_0_fe_mul(&zi3, &zi2, zi); + rustsecp256k1_v0_1_0_fe_mul(&r->x, &a->x, &zi2); + rustsecp256k1_v0_1_0_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; } -static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) { +static void rustsecp256k1_v0_1_0_ge_set_xy(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_fe *y) { r->infinity = 0; r->x = *x; r->y = *y; } -static int secp256k1_ge_is_infinity(const secp256k1_ge *a) { +static int rustsecp256k1_v0_1_0_ge_is_infinity(const rustsecp256k1_v0_1_0_ge *a) { return a->infinity; } -static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) { +static void rustsecp256k1_v0_1_0_ge_neg(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a) { *r = *a; - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1); } -static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { - secp256k1_fe z2, z3; +static void rustsecp256k1_v0_1_0_ge_set_gej(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_fe z2, z3; r->infinity = a->infinity; - secp256k1_fe_inv(&a->z, &a->z); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_mul(&z3, &a->z, &z2); - secp256k1_fe_mul(&a->x, &a->x, &z2); - secp256k1_fe_mul(&a->y, &a->y, &z3); - secp256k1_fe_set_int(&a->z, 1); + rustsecp256k1_v0_1_0_fe_inv(&a->z, &a->z); + rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_1_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_1_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_1_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_1_0_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } -static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { - secp256k1_fe z2, z3; +static void rustsecp256k1_v0_1_0_ge_set_gej_var(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_fe z2, z3; r->infinity = a->infinity; if (a->infinity) { return; } - secp256k1_fe_inv_var(&a->z, &a->z); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_mul(&z3, &a->z, &z2); - secp256k1_fe_mul(&a->x, &a->x, &z2); - secp256k1_fe_mul(&a->y, &a->y, &z3); - secp256k1_fe_set_int(&a->z, 1); + rustsecp256k1_v0_1_0_fe_inv_var(&a->z, &a->z); + rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_1_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_1_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_1_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_1_0_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { - secp256k1_fe u; +static void rustsecp256k1_v0_1_0_ge_set_all_gej_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, size_t len) { + rustsecp256k1_v0_1_0_fe u; size_t i; size_t last_i = SIZE_MAX; @@ -137,7 +137,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a if (last_i == SIZE_MAX) { r[i].x = a[i].z; } else { - secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); + rustsecp256k1_v0_1_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); } last_i = i; } @@ -145,14 +145,14 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a if (last_i == SIZE_MAX) { return; } - secp256k1_fe_inv_var(&u, &r[last_i].x); + rustsecp256k1_v0_1_0_fe_inv_var(&u, &r[last_i].x); i = last_i; while (i > 0) { i--; if (!a[i].infinity) { - secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u); - secp256k1_fe_mul(&u, &u, &a[last_i].z); + rustsecp256k1_v0_1_0_fe_mul(&r[last_i].x, &r[i].x, &u); + rustsecp256k1_v0_1_0_fe_mul(&u, &u, &a[last_i].z); last_i = i; } } @@ -162,21 +162,21 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a for (i = 0; i < len; i++) { r[i].infinity = a[i].infinity; if (!a[i].infinity) { - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); + rustsecp256k1_v0_1_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } } -static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) { +static void rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_fe *globalz, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zr) { size_t i = len - 1; - secp256k1_fe zs; + rustsecp256k1_v0_1_0_fe zs; if (len > 0) { /* The z of the final point gives us the "global Z" for the table. */ r[i].x = a[i].x; r[i].y = a[i].y; /* Ensure all y values are in weak normal form for fast negation of points */ - secp256k1_fe_normalize_weak(&r[i].y); + rustsecp256k1_v0_1_0_fe_normalize_weak(&r[i].y); *globalz = a[i].z; r[i].infinity = 0; zs = zr[i]; @@ -184,93 +184,93 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp /* Work our way backwards, using the z-ratios to scale the x/y values. */ while (i > 0) { if (i != len - 1) { - secp256k1_fe_mul(&zs, &zs, &zr[i]); + rustsecp256k1_v0_1_0_fe_mul(&zs, &zs, &zr[i]); } i--; - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs); + rustsecp256k1_v0_1_0_ge_set_gej_zinv(&r[i], &a[i], &zs); } } } -static void secp256k1_gej_set_infinity(secp256k1_gej *r) { +static void rustsecp256k1_v0_1_0_gej_set_infinity(rustsecp256k1_v0_1_0_gej *r) { r->infinity = 1; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); - secp256k1_fe_clear(&r->z); + rustsecp256k1_v0_1_0_fe_clear(&r->x); + rustsecp256k1_v0_1_0_fe_clear(&r->y); + rustsecp256k1_v0_1_0_fe_clear(&r->z); } -static void secp256k1_ge_set_infinity(secp256k1_ge *r) { +static void rustsecp256k1_v0_1_0_ge_set_infinity(rustsecp256k1_v0_1_0_ge *r) { r->infinity = 1; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); + rustsecp256k1_v0_1_0_fe_clear(&r->x); + rustsecp256k1_v0_1_0_fe_clear(&r->y); } -static void secp256k1_gej_clear(secp256k1_gej *r) { +static void rustsecp256k1_v0_1_0_gej_clear(rustsecp256k1_v0_1_0_gej *r) { r->infinity = 0; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); - secp256k1_fe_clear(&r->z); + rustsecp256k1_v0_1_0_fe_clear(&r->x); + rustsecp256k1_v0_1_0_fe_clear(&r->y); + rustsecp256k1_v0_1_0_fe_clear(&r->z); } -static void secp256k1_ge_clear(secp256k1_ge *r) { +static void rustsecp256k1_v0_1_0_ge_clear(rustsecp256k1_v0_1_0_ge *r) { r->infinity = 0; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); + rustsecp256k1_v0_1_0_fe_clear(&r->x); + rustsecp256k1_v0_1_0_fe_clear(&r->y); } -static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { - secp256k1_fe x2, x3, c; +static int rustsecp256k1_v0_1_0_ge_set_xquad(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x) { + rustsecp256k1_v0_1_0_fe x2, x3, c; r->x = *x; - secp256k1_fe_sqr(&x2, x); - secp256k1_fe_mul(&x3, x, &x2); + rustsecp256k1_v0_1_0_fe_sqr(&x2, x); + rustsecp256k1_v0_1_0_fe_mul(&x3, x, &x2); r->infinity = 0; - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&c, &x3); - return secp256k1_fe_sqrt(&r->y, &c); + rustsecp256k1_v0_1_0_fe_set_int(&c, CURVE_B); + rustsecp256k1_v0_1_0_fe_add(&c, &x3); + return rustsecp256k1_v0_1_0_fe_sqrt(&r->y, &c); } -static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { - if (!secp256k1_ge_set_xquad(r, x)) { +static int rustsecp256k1_v0_1_0_ge_set_xo_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, int odd) { + if (!rustsecp256k1_v0_1_0_ge_set_xquad(r, x)) { return 0; } - secp256k1_fe_normalize_var(&r->y); - if (secp256k1_fe_is_odd(&r->y) != odd) { - secp256k1_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_1_0_fe_normalize_var(&r->y); + if (rustsecp256k1_v0_1_0_fe_is_odd(&r->y) != odd) { + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1); } return 1; } -static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) { +static void rustsecp256k1_v0_1_0_gej_set_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; - secp256k1_fe_set_int(&r->z, 1); + rustsecp256k1_v0_1_0_fe_set_int(&r->z, 1); } -static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) { - secp256k1_fe r, r2; +static int rustsecp256k1_v0_1_0_gej_eq_x_var(const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_fe r, r2; VERIFY_CHECK(!a->infinity); - secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); - r2 = a->x; secp256k1_fe_normalize_weak(&r2); - return secp256k1_fe_equal_var(&r, &r2); + rustsecp256k1_v0_1_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_1_0_fe_mul(&r, &r, x); + r2 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&r2); + return rustsecp256k1_v0_1_0_fe_equal_var(&r, &r2); } -static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { +static void rustsecp256k1_v0_1_0_gej_neg(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; r->z = a->z; - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1); } -static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { +static int rustsecp256k1_v0_1_0_gej_is_infinity(const rustsecp256k1_v0_1_0_gej *a) { return a->infinity; } -static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { - secp256k1_fe y2, x3, z2, z6; +static int rustsecp256k1_v0_1_0_gej_is_valid_var(const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_fe y2, x3, z2, z6; if (a->infinity) { return 0; } @@ -279,31 +279,31 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { * Y^2 / Z^6 = X^3 / Z^6 + 7 * Y^2 = X^3 + 7*Z^6 */ - secp256k1_fe_sqr(&y2, &a->y); - secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); - secp256k1_fe_mul_int(&z6, CURVE_B); - secp256k1_fe_add(&x3, &z6); - secp256k1_fe_normalize_weak(&x3); - return secp256k1_fe_equal_var(&y2, &x3); + rustsecp256k1_v0_1_0_fe_sqr(&y2, &a->y); + rustsecp256k1_v0_1_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, &a->x); + rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_1_0_fe_sqr(&z6, &z2); rustsecp256k1_v0_1_0_fe_mul(&z6, &z6, &z2); + rustsecp256k1_v0_1_0_fe_mul_int(&z6, CURVE_B); + rustsecp256k1_v0_1_0_fe_add(&x3, &z6); + rustsecp256k1_v0_1_0_fe_normalize_weak(&x3); + return rustsecp256k1_v0_1_0_fe_equal_var(&y2, &x3); } -static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { - secp256k1_fe y2, x3, c; +static int rustsecp256k1_v0_1_0_ge_is_valid_var(const rustsecp256k1_v0_1_0_ge *a) { + rustsecp256k1_v0_1_0_fe y2, x3, c; if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ - secp256k1_fe_sqr(&y2, &a->y); - secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&x3, &c); - secp256k1_fe_normalize_weak(&x3); - return secp256k1_fe_equal_var(&y2, &x3); + rustsecp256k1_v0_1_0_fe_sqr(&y2, &a->y); + rustsecp256k1_v0_1_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, &a->x); + rustsecp256k1_v0_1_0_fe_set_int(&c, CURVE_B); + rustsecp256k1_v0_1_0_fe_add(&x3, &c); + rustsecp256k1_v0_1_0_fe_normalize_weak(&x3); + return rustsecp256k1_v0_1_0_fe_equal_var(&y2, &x3); } -static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { +static void rustsecp256k1_v0_1_0_gej_double_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr) { /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. * * Note that there is an implementation described at @@ -311,7 +311,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s * which trades a multiply for a square, but in practice this is actually slower, * mainly because it requires more normalizations. */ - secp256k1_fe t1,t2,t3,t4; + rustsecp256k1_v0_1_0_fe t1,t2,t3,t4; /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. @@ -325,47 +325,47 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s r->infinity = a->infinity; if (r->infinity) { if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); + rustsecp256k1_v0_1_0_fe_set_int(rzr, 1); } return; } if (rzr != NULL) { *rzr = a->y; - secp256k1_fe_normalize_weak(rzr); - secp256k1_fe_mul_int(rzr, 2); + rustsecp256k1_v0_1_0_fe_normalize_weak(rzr); + rustsecp256k1_v0_1_0_fe_mul_int(rzr, 2); } - secp256k1_fe_mul(&r->z, &a->z, &a->y); - secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ - secp256k1_fe_sqr(&t1, &a->x); - secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ - secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ - secp256k1_fe_sqr(&t3, &a->y); - secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ - secp256k1_fe_sqr(&t4, &t3); - secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ - secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &a->y); + rustsecp256k1_v0_1_0_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ + rustsecp256k1_v0_1_0_fe_sqr(&t1, &a->x); + rustsecp256k1_v0_1_0_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ + rustsecp256k1_v0_1_0_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ + rustsecp256k1_v0_1_0_fe_sqr(&t3, &a->y); + rustsecp256k1_v0_1_0_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ + rustsecp256k1_v0_1_0_fe_sqr(&t4, &t3); + rustsecp256k1_v0_1_0_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ + rustsecp256k1_v0_1_0_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ r->x = t3; - secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ - secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ - secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ - secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ - secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ - secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ - secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ - secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ - secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ + rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ + rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ + rustsecp256k1_v0_1_0_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ + rustsecp256k1_v0_1_0_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ + rustsecp256k1_v0_1_0_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ + rustsecp256k1_v0_1_0_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ + rustsecp256k1_v0_1_0_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ + rustsecp256k1_v0_1_0_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ + rustsecp256k1_v0_1_0_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ } -static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { - VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); - secp256k1_gej_double_var(r, a, rzr); +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_gej_double_nonzero(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr) { + VERIFY_CHECK(!rustsecp256k1_v0_1_0_gej_is_infinity(a)); + rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr); } -static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { +static void rustsecp256k1_v0_1_0_gej_add_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_gej *b, rustsecp256k1_v0_1_0_fe *rzr) { /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ - secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_1_0_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); @@ -375,112 +375,112 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons if (b->infinity) { if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); + rustsecp256k1_v0_1_0_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; - secp256k1_fe_sqr(&z22, &b->z); - secp256k1_fe_sqr(&z12, &a->z); - secp256k1_fe_mul(&u1, &a->x, &z22); - secp256k1_fe_mul(&u2, &b->x, &z12); - secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, rzr); + rustsecp256k1_v0_1_0_fe_sqr(&z22, &b->z); + rustsecp256k1_v0_1_0_fe_sqr(&z12, &a->z); + rustsecp256k1_v0_1_0_fe_mul(&u1, &a->x, &z22); + rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_1_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_1_0_fe_mul(&s1, &s1, &b->z); + rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2); + rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2); + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 0); + rustsecp256k1_v0_1_0_fe_set_int(rzr, 0); } r->infinity = 1; } return; } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - secp256k1_fe_mul(&h, &h, &b->z); + rustsecp256k1_v0_1_0_fe_sqr(&i2, &i); + rustsecp256k1_v0_1_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2); + rustsecp256k1_v0_1_0_fe_mul(&h, &h, &b->z); if (rzr != NULL) { *rzr = h; } - secp256k1_fe_mul(&r->z, &a->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); + rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2); + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_1_0_fe_add(&r->y, &h3); } -static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { +static void rustsecp256k1_v0_1_0_gej_add_ge_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, rustsecp256k1_v0_1_0_fe *rzr) { /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_1_0_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); - secp256k1_gej_set_ge(r, b); + rustsecp256k1_v0_1_0_gej_set_ge(r, b); return; } if (b->infinity) { if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); + rustsecp256k1_v0_1_0_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; - secp256k1_fe_sqr(&z12, &a->z); - u1 = a->x; secp256k1_fe_normalize_weak(&u1); - secp256k1_fe_mul(&u2, &b->x, &z12); - s1 = a->y; secp256k1_fe_normalize_weak(&s1); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, rzr); + rustsecp256k1_v0_1_0_fe_sqr(&z12, &a->z); + u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1); + rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1); + rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2); + rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2); + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 0); + rustsecp256k1_v0_1_0_fe_set_int(rzr, 0); } r->infinity = 1; } return; } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); + rustsecp256k1_v0_1_0_fe_sqr(&i2, &i); + rustsecp256k1_v0_1_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2); if (rzr != NULL) { *rzr = h; } - secp256k1_fe_mul(&r->z, &a->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); + rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2); + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_1_0_fe_add(&r->y, &h3); } -static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { +static void rustsecp256k1_v0_1_0_gej_add_zinv_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, const rustsecp256k1_v0_1_0_fe *bzinv) { /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_1_0_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (b->infinity) { *r = *a; return; } if (a->infinity) { - secp256k1_fe bzinv2, bzinv3; + rustsecp256k1_v0_1_0_fe bzinv2, bzinv3; r->infinity = b->infinity; - secp256k1_fe_sqr(&bzinv2, bzinv); - secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv); - secp256k1_fe_mul(&r->x, &b->x, &bzinv2); - secp256k1_fe_mul(&r->y, &b->y, &bzinv3); - secp256k1_fe_set_int(&r->z, 1); + rustsecp256k1_v0_1_0_fe_sqr(&bzinv2, bzinv); + rustsecp256k1_v0_1_0_fe_mul(&bzinv3, &bzinv2, bzinv); + rustsecp256k1_v0_1_0_fe_mul(&r->x, &b->x, &bzinv2); + rustsecp256k1_v0_1_0_fe_mul(&r->y, &b->y, &bzinv3); + rustsecp256k1_v0_1_0_fe_set_int(&r->z, 1); return; } r->infinity = 0; @@ -493,40 +493,40 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, * The variable az below holds the modified Z coordinate for a, which is used * for the computation of rx and ry, but not for rz. */ - secp256k1_fe_mul(&az, &a->z, bzinv); + rustsecp256k1_v0_1_0_fe_mul(&az, &a->z, bzinv); - secp256k1_fe_sqr(&z12, &az); - u1 = a->x; secp256k1_fe_normalize_weak(&u1); - secp256k1_fe_mul(&u2, &b->x, &z12); - s1 = a->y; secp256k1_fe_normalize_weak(&s1); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, NULL); + rustsecp256k1_v0_1_0_fe_sqr(&z12, &az); + u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1); + rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1); + rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &az); + rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2); + rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2); + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_1_0_gej_double_var(r, a, NULL); } else { r->infinity = 1; } return; } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); + rustsecp256k1_v0_1_0_fe_sqr(&i2, &i); + rustsecp256k1_v0_1_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2); + r->z = a->z; rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &h); + rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2); + rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_1_0_fe_add(&r->y, &h3); } -static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) { +static void rustsecp256k1_v0_1_0_gej_add_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b) { /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ - static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; - secp256k1_fe m_alt, rr_alt; + static const rustsecp256k1_v0_1_0_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_1_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; + rustsecp256k1_v0_1_0_fe m_alt, rr_alt; int infinity, degenerate; VERIFY_CHECK(!b->infinity); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); @@ -581,115 +581,115 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const * so this covers everything. */ - secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ - u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ - secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ - s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ - secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ - secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ - t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ - m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ - secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ - secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ - secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ - secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ + rustsecp256k1_v0_1_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */ + u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ + s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ + t = u1; rustsecp256k1_v0_1_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ + m = s1; rustsecp256k1_v0_1_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ + rustsecp256k1_v0_1_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */ + rustsecp256k1_v0_1_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ + rustsecp256k1_v0_1_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ + rustsecp256k1_v0_1_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" * case that Z = z1z2 = 0, and this is special-cased later on). */ - degenerate = secp256k1_fe_normalizes_to_zero(&m) & - secp256k1_fe_normalizes_to_zero(&rr); + degenerate = rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&m) & + rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&rr); /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. * This means either x1 == beta*x2 or beta*x1 == x2, where beta is * a nontrivial cube root of one. In either case, an alternate * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), * so we set R/M equal to this. */ rr_alt = s1; - secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ - secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ + rustsecp256k1_v0_1_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ + rustsecp256k1_v0_1_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ - secp256k1_fe_cmov(&rr_alt, &rr, !degenerate); - secp256k1_fe_cmov(&m_alt, &m, !degenerate); + rustsecp256k1_v0_1_0_fe_cmov(&rr_alt, &rr, !degenerate); + rustsecp256k1_v0_1_0_fe_cmov(&m_alt, &m, !degenerate); /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. * From here on out Ralt and Malt represent the numerator * and denominator of lambda; R and M represent the explicit * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ - secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ - secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ + rustsecp256k1_v0_1_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ /* These two lines use the observation that either M == Malt or M == 0, * so M^3 * Malt is either Malt^4 (which is computed by squaring), or * zero (which is "computed" by cmov). So the cost is one squaring * versus two multiplications. */ - secp256k1_fe_sqr(&n, &n); - secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ - secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ - secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ - infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); - secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ - secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ - secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ - secp256k1_fe_normalize_weak(&t); + rustsecp256k1_v0_1_0_fe_sqr(&n, &n); + rustsecp256k1_v0_1_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ + rustsecp256k1_v0_1_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ + rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ + infinity = rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); + rustsecp256k1_v0_1_0_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ + rustsecp256k1_v0_1_0_fe_negate(&q, &q, 1); /* q = -Q (2) */ + rustsecp256k1_v0_1_0_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ + rustsecp256k1_v0_1_0_fe_normalize_weak(&t); r->x = t; /* r->x = Ralt^2-Q (1) */ - secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ - secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ - secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ - secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ - secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ - secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ + rustsecp256k1_v0_1_0_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ + rustsecp256k1_v0_1_0_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ + rustsecp256k1_v0_1_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ + rustsecp256k1_v0_1_0_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ + rustsecp256k1_v0_1_0_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ + rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ + rustsecp256k1_v0_1_0_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ - secp256k1_fe_cmov(&r->x, &b->x, a->infinity); - secp256k1_fe_cmov(&r->y, &b->y, a->infinity); - secp256k1_fe_cmov(&r->z, &fe_1, a->infinity); + rustsecp256k1_v0_1_0_fe_cmov(&r->x, &b->x, a->infinity); + rustsecp256k1_v0_1_0_fe_cmov(&r->y, &b->y, a->infinity); + rustsecp256k1_v0_1_0_fe_cmov(&r->z, &fe_1, a->infinity); r->infinity = infinity; } -static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { +static void rustsecp256k1_v0_1_0_gej_rescale(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_fe *s) { /* Operations: 4 mul, 1 sqr */ - secp256k1_fe zz; - VERIFY_CHECK(!secp256k1_fe_is_zero(s)); - secp256k1_fe_sqr(&zz, s); - secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ - secp256k1_fe_mul(&r->y, &r->y, &zz); - secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ - secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ + rustsecp256k1_v0_1_0_fe zz; + VERIFY_CHECK(!rustsecp256k1_v0_1_0_fe_is_zero(s)); + rustsecp256k1_v0_1_0_fe_sqr(&zz, s); + rustsecp256k1_v0_1_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ + rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &zz); + rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ + rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */ } -static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) { - secp256k1_fe x, y; +static void rustsecp256k1_v0_1_0_ge_to_storage(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge *a) { + rustsecp256k1_v0_1_0_fe x, y; VERIFY_CHECK(!a->infinity); x = a->x; - secp256k1_fe_normalize(&x); + rustsecp256k1_v0_1_0_fe_normalize(&x); y = a->y; - secp256k1_fe_normalize(&y); - secp256k1_fe_to_storage(&r->x, &x); - secp256k1_fe_to_storage(&r->y, &y); + rustsecp256k1_v0_1_0_fe_normalize(&y); + rustsecp256k1_v0_1_0_fe_to_storage(&r->x, &x); + rustsecp256k1_v0_1_0_fe_to_storage(&r->y, &y); } -static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) { - secp256k1_fe_from_storage(&r->x, &a->x); - secp256k1_fe_from_storage(&r->y, &a->y); +static void rustsecp256k1_v0_1_0_ge_from_storage(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge_storage *a) { + rustsecp256k1_v0_1_0_fe_from_storage(&r->x, &a->x); + rustsecp256k1_v0_1_0_fe_from_storage(&r->y, &a->y); r->infinity = 0; } -static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) { - secp256k1_fe_storage_cmov(&r->x, &a->x, flag); - secp256k1_fe_storage_cmov(&r->y, &a->y, flag); +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_ge_storage_cmov(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge_storage *a, int flag) { + rustsecp256k1_v0_1_0_fe_storage_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_1_0_fe_storage_cmov(&r->y, &a->y, flag); } #ifdef USE_ENDOMORPHISM -static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { - static const secp256k1_fe beta = SECP256K1_FE_CONST( +static void rustsecp256k1_v0_1_0_ge_mul_lambda(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a) { + static const rustsecp256k1_v0_1_0_fe beta = SECP256K1_FE_CONST( 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul ); *r = *a; - secp256k1_fe_mul(&r->x, &r->x, &beta); + rustsecp256k1_v0_1_0_fe_mul(&r->x, &r->x, &beta); } #endif -static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { - secp256k1_fe yz; +static int rustsecp256k1_v0_1_0_gej_has_quad_y_var(const rustsecp256k1_v0_1_0_gej *a) { + rustsecp256k1_v0_1_0_fe yz; if (a->infinity) { return 0; @@ -698,8 +698,8 @@ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z is */ - secp256k1_fe_mul(&yz, &a->y, &a->z); - return secp256k1_fe_is_quad_var(&yz); + rustsecp256k1_v0_1_0_fe_mul(&yz, &a->y, &a->z); + return rustsecp256k1_v0_1_0_fe_is_quad_var(&yz); } #endif /* SECP256K1_GROUP_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash.h b/secp256k1-sys/depend/secp256k1/src/hash.h index de26e4b..a177d64 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash.h +++ b/secp256k1-sys/depend/secp256k1/src/hash.h @@ -14,28 +14,28 @@ typedef struct { uint32_t s[8]; uint32_t buf[16]; /* In big endian */ size_t bytes; -} secp256k1_sha256; +} rustsecp256k1_v0_1_0_sha256; -static void secp256k1_sha256_initialize(secp256k1_sha256 *hash); -static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t size); -static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_1_0_sha256_initialize(rustsecp256k1_v0_1_0_sha256 *hash); +static void rustsecp256k1_v0_1_0_sha256_write(rustsecp256k1_v0_1_0_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_1_0_sha256_finalize(rustsecp256k1_v0_1_0_sha256 *hash, unsigned char *out32); typedef struct { - secp256k1_sha256 inner, outer; -} secp256k1_hmac_sha256; + rustsecp256k1_v0_1_0_sha256 inner, outer; +} rustsecp256k1_v0_1_0_hmac_sha256; -static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t size); -static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256 *hash, const unsigned char *data, size_t size); -static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_1_0_hmac_sha256_initialize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *key, size_t size); +static void rustsecp256k1_v0_1_0_hmac_sha256_write(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_1_0_hmac_sha256_finalize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, unsigned char *out32); typedef struct { unsigned char v[32]; unsigned char k[32]; int retry; -} secp256k1_rfc6979_hmac_sha256; +} rustsecp256k1_v0_1_0_rfc6979_hmac_sha256; -static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); -static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); -static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 *rng); +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng); #endif /* SECP256K1_HASH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash_impl.h b/secp256k1-sys/depend/secp256k1/src/hash_impl.h index 009f26b..78aebac 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/hash_impl.h @@ -33,7 +33,7 @@ #define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #endif -static void secp256k1_sha256_initialize(secp256k1_sha256 *hash) { +static void rustsecp256k1_v0_1_0_sha256_initialize(rustsecp256k1_v0_1_0_sha256 *hash) { hash->s[0] = 0x6a09e667ul; hash->s[1] = 0xbb67ae85ul; hash->s[2] = 0x3c6ef372ul; @@ -46,7 +46,7 @@ static void secp256k1_sha256_initialize(secp256k1_sha256 *hash) { } /** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ -static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) { +static void rustsecp256k1_v0_1_0_sha256_transform(uint32_t* s, const uint32_t* chunk) { uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; @@ -128,7 +128,7 @@ static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) { s[7] += h; } -static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t len) { +static void rustsecp256k1_v0_1_0_sha256_write(rustsecp256k1_v0_1_0_sha256 *hash, const unsigned char *data, size_t len) { size_t bufsize = hash->bytes & 0x3F; hash->bytes += len; while (bufsize + len >= 64) { @@ -137,7 +137,7 @@ static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char * memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len); data += chunk_len; len -= chunk_len; - secp256k1_sha256_transform(hash->s, hash->buf); + rustsecp256k1_v0_1_0_sha256_transform(hash->s, hash->buf); bufsize = 0; } if (len) { @@ -146,15 +146,15 @@ static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char * } } -static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_1_0_sha256_finalize(rustsecp256k1_v0_1_0_sha256 *hash, unsigned char *out32) { static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t sizedesc[2]; uint32_t out[8]; int i = 0; sizedesc[0] = BE32(hash->bytes >> 29); sizedesc[1] = BE32(hash->bytes << 3); - secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); - secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8); + rustsecp256k1_v0_1_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); + rustsecp256k1_v0_1_0_sha256_write(hash, (const unsigned char*)sizedesc, 8); for (i = 0; i < 8; i++) { out[i] = BE32(hash->s[i]); hash->s[i] = 0; @@ -162,49 +162,49 @@ static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out memcpy(out32, (const unsigned char*)out, 32); } -static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { +static void rustsecp256k1_v0_1_0_hmac_sha256_initialize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { size_t n; unsigned char rkey[64]; if (keylen <= sizeof(rkey)) { memcpy(rkey, key, keylen); memset(rkey + keylen, 0, sizeof(rkey) - keylen); } else { - secp256k1_sha256 sha256; - secp256k1_sha256_initialize(&sha256); - secp256k1_sha256_write(&sha256, key, keylen); - secp256k1_sha256_finalize(&sha256, rkey); + rustsecp256k1_v0_1_0_sha256 sha256; + rustsecp256k1_v0_1_0_sha256_initialize(&sha256); + rustsecp256k1_v0_1_0_sha256_write(&sha256, key, keylen); + rustsecp256k1_v0_1_0_sha256_finalize(&sha256, rkey); memset(rkey + 32, 0, 32); } - secp256k1_sha256_initialize(&hash->outer); + rustsecp256k1_v0_1_0_sha256_initialize(&hash->outer); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c; } - secp256k1_sha256_write(&hash->outer, rkey, sizeof(rkey)); + rustsecp256k1_v0_1_0_sha256_write(&hash->outer, rkey, sizeof(rkey)); - secp256k1_sha256_initialize(&hash->inner); + rustsecp256k1_v0_1_0_sha256_initialize(&hash->inner); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c ^ 0x36; } - secp256k1_sha256_write(&hash->inner, rkey, sizeof(rkey)); + rustsecp256k1_v0_1_0_sha256_write(&hash->inner, rkey, sizeof(rkey)); memset(rkey, 0, sizeof(rkey)); } -static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256 *hash, const unsigned char *data, size_t size) { - secp256k1_sha256_write(&hash->inner, data, size); +static void rustsecp256k1_v0_1_0_hmac_sha256_write(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *data, size_t size) { + rustsecp256k1_v0_1_0_sha256_write(&hash->inner, data, size); } -static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_1_0_hmac_sha256_finalize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, unsigned char *out32) { unsigned char temp[32]; - secp256k1_sha256_finalize(&hash->inner, temp); - secp256k1_sha256_write(&hash->outer, temp, 32); + rustsecp256k1_v0_1_0_sha256_finalize(&hash->inner, temp); + rustsecp256k1_v0_1_0_sha256_write(&hash->outer, temp, 32); memset(temp, 0, 32); - secp256k1_sha256_finalize(&hash->outer, out32); + rustsecp256k1_v0_1_0_sha256_finalize(&hash->outer, out32); } -static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { - secp256k1_hmac_sha256 hmac; +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { + rustsecp256k1_v0_1_0_hmac_sha256 hmac; static const unsigned char zero[1] = {0x00}; static const unsigned char one[1] = {0x01}; @@ -212,47 +212,47 @@ static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha2 memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ /* RFC6979 3.2.d. */ - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, zero, 1); - secp256k1_hmac_sha256_write(&hmac, key, keylen); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v); /* RFC6979 3.2.f. */ - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, one, 1); - secp256k1_hmac_sha256_write(&hmac, key, keylen); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, one, 1); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v); rng->retry = 0; } -static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { /* RFC6979 3.2.h. */ static const unsigned char zero[1] = {0x00}; if (rng->retry) { - secp256k1_hmac_sha256 hmac; - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, zero, 1); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_1_0_hmac_sha256 hmac; + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v); } while (outlen > 0) { - secp256k1_hmac_sha256 hmac; + rustsecp256k1_v0_1_0_hmac_sha256 hmac; int now = outlen; - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v); if (now > 32) { now = 32; } @@ -264,7 +264,7 @@ static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 rng->retry = 1; } -static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 *rng) { +static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng) { memset(rng->k, 0, 32); memset(rng->v, 0, 32); rng->retry = 0; diff --git a/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java index 1c67802..00a2b73 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java +++ b/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java @@ -69,7 +69,7 @@ public class NativeSecp256k1 { r.lock(); try { - return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1; + return rustsecp256k1_v0_1_0_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1; } finally { r.unlock(); } @@ -101,7 +101,7 @@ public class NativeSecp256k1 { r.lock(); try { - retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext()); + retByteArray = rustsecp256k1_v0_1_0_ecdsa_sign(byteBuff, Secp256k1Context.getContext()); } finally { r.unlock(); } @@ -134,7 +134,7 @@ public class NativeSecp256k1 { r.lock(); try { - return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1; + return rustsecp256k1_v0_1_0_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1; } finally { r.unlock(); } @@ -166,7 +166,7 @@ public class NativeSecp256k1 { r.lock(); try { - retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext()); + retByteArray = rustsecp256k1_v0_1_0_ec_pubkey_create(byteBuff, Secp256k1Context.getContext()); } finally { r.unlock(); } @@ -187,7 +187,7 @@ public class NativeSecp256k1 { public static synchronized void cleanup() { w.lock(); try { - secp256k1_destroy_context(Secp256k1Context.getContext()); + rustsecp256k1_v0_1_0_destroy_context(Secp256k1Context.getContext()); } finally { w.unlock(); } @@ -196,7 +196,7 @@ public class NativeSecp256k1 { public static long cloneContext() { r.lock(); try { - return secp256k1_ctx_clone(Secp256k1Context.getContext()); + return rustsecp256k1_v0_1_0_ctx_clone(Secp256k1Context.getContext()); } finally { r.unlock(); } } @@ -222,7 +222,7 @@ public class NativeSecp256k1 { byte[][] retByteArray; r.lock(); try { - retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext()); + retByteArray = rustsecp256k1_v0_1_0_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext()); } finally { r.unlock(); } @@ -261,7 +261,7 @@ public class NativeSecp256k1 { byte[][] retByteArray; r.lock(); try { - retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext()); + retByteArray = rustsecp256k1_v0_1_0_privkey_tweak_add(byteBuff,Secp256k1Context.getContext()); } finally { r.unlock(); } @@ -300,7 +300,7 @@ public class NativeSecp256k1 { byte[][] retByteArray; r.lock(); try { - retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length); + retByteArray = rustsecp256k1_v0_1_0_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length); } finally { r.unlock(); } @@ -339,7 +339,7 @@ public class NativeSecp256k1 { byte[][] retByteArray; r.lock(); try { - retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length); + retByteArray = rustsecp256k1_v0_1_0_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length); } finally { r.unlock(); } @@ -378,7 +378,7 @@ public class NativeSecp256k1 { byte[][] retByteArray; r.lock(); try { - retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length); + retByteArray = rustsecp256k1_v0_1_0_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length); } finally { r.unlock(); } @@ -411,36 +411,36 @@ public class NativeSecp256k1 { w.lock(); try { - return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1; + return rustsecp256k1_v0_1_0_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1; } finally { w.unlock(); } } - private static native long secp256k1_ctx_clone(long context); + private static native long rustsecp256k1_v0_1_0_ctx_clone(long context); - private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context); + private static native int rustsecp256k1_v0_1_0_context_randomize(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context); + private static native byte[][] rustsecp256k1_v0_1_0_privkey_tweak_add(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context); + private static native byte[][] rustsecp256k1_v0_1_0_privkey_tweak_mul(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen); + private static native byte[][] rustsecp256k1_v0_1_0_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen); - private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen); + private static native byte[][] rustsecp256k1_v0_1_0_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen); - private static native void secp256k1_destroy_context(long context); + private static native void rustsecp256k1_v0_1_0_destroy_context(long context); - private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen); + private static native int rustsecp256k1_v0_1_0_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen); - private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context); + private static native byte[][] rustsecp256k1_v0_1_0_ecdsa_sign(ByteBuffer byteBuff, long context); - private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context); + private static native int rustsecp256k1_v0_1_0_ec_seckey_verify(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context); + private static native byte[][] rustsecp256k1_v0_1_0_ec_pubkey_create(ByteBuffer byteBuff, long context); - private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen); + private static native byte[][] rustsecp256k1_v0_1_0_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen); - private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen); + private static native byte[][] rustsecp256k1_v0_1_0_ecdh(ByteBuffer byteBuff, long context, int inputLen); } diff --git a/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/Secp256k1Context.java b/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/Secp256k1Context.java index 216c986..876226d 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/Secp256k1Context.java +++ b/secp256k1-sys/depend/secp256k1/src/java/org/bitcoin/Secp256k1Context.java @@ -29,7 +29,7 @@ public class Secp256k1Context { long contextRef = -1; try { System.loadLibrary("secp256k1"); - contextRef = secp256k1_init_context(); + contextRef = rustsecp256k1_v0_1_0_init_context(); } catch (UnsatisfiedLinkError e) { System.out.println("UnsatisfiedLinkError: " + e.toString()); isEnabled = false; @@ -47,5 +47,5 @@ public class Secp256k1Context { return context; } - private static native long secp256k1_init_context(); + private static native long rustsecp256k1_v0_1_0_init_context(); } diff --git a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c index b50970b..40a20ee 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c +++ b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c @@ -7,12 +7,12 @@ #include "include/secp256k1_recovery.h" -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ctx_1clone (JNIEnv* env, jclass classObject, jlong ctx_l) { - const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + const rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; - jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx); + jlong ctx_clone_l = (uintptr_t) rustsecp256k1_v0_1_0_context_clone(ctx); (void)classObject;(void)env; @@ -20,48 +20,48 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clo } -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1context_1randomize (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); (void)classObject; - return secp256k1_context_randomize(ctx, seed); + return rustsecp256k1_v0_1_0_context_randomize(ctx, seed); } -SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context +SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1destroy_1context (JNIEnv* env, jclass classObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; - secp256k1_context_destroy(ctx); + rustsecp256k1_v0_1_0_context_destroy(ctx); (void)classObject;(void)env; } -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1verify (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* sigdata = { (unsigned char*) (data + 32) }; const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) }; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pubkey; - int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen); + int ret = rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen); if( ret ) { - ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); + ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); if( ret ) { - ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey); + ret = rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, data, &pubkey); } } @@ -70,10 +70,10 @@ SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1ve return ret; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1sign (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); unsigned char* secKey = (unsigned char*) (data + 32); @@ -81,15 +81,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e jbyteArray sigArray, intsByteArray; unsigned char intsarray[2]; - secp256k1_ecdsa_signature sig[72]; + rustsecp256k1_v0_1_0_ecdsa_signature sig[72]; - int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL); + int ret = rustsecp256k1_v0_1_0_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL); unsigned char outputSer[72]; size_t outputLen = 72; if( ret ) { - int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2; + int ret2 = rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2; } intsarray[0] = outputLen; @@ -112,36 +112,36 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e return retArray; } -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1seckey_1verify (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); (void)classObject; - return secp256k1_ec_seckey_verify(ctx, secKey); + return rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, secKey); } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1create (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; jobjectArray retArray; jbyteArray pubkeyArray, intsByteArray; unsigned char intsarray[2]; - int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey); + int ret = rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, secKey); unsigned char outputSer[65]; size_t outputLen = 65; if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; } intsarray[0] = outputLen; @@ -165,10 +165,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1add (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* tweak = (unsigned char*) (privkey + 32); @@ -178,7 +178,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p int privkeylen = 32; - int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak); + int ret = rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, privkey, tweak); intsarray[0] = privkeylen; intsarray[1] = ret; @@ -200,10 +200,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p return retArray; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1mul (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* tweak = (unsigned char*) (privkey + 32); @@ -213,7 +213,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p int privkeylen = 32; - int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak); + int ret = rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, privkey, tweak); intsarray[0] = privkeylen; intsarray[1] = ret; @@ -235,11 +235,11 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p return retArray; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1add (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; -/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/ + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; +/* rustsecp256k1_v0_1_0_pubkey* pubkey = (rustsecp256k1_v0_1_0_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/ unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* tweak = (unsigned char*) (pkey + publen); @@ -249,15 +249,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p unsigned char outputSer[65]; size_t outputLen = 65; - secp256k1_pubkey pubkey; - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); + rustsecp256k1_v0_1_0_pubkey pubkey; + int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pkey, publen); if( ret ) { - ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak); + ret = rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, tweak); } if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; } intsarray[0] = outputLen; @@ -280,10 +280,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p return retArray; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1mul (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* tweak = (unsigned char*) (pkey + publen); @@ -293,15 +293,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p unsigned char outputSer[65]; size_t outputLen = 65; - secp256k1_pubkey pubkey; - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); + rustsecp256k1_v0_1_0_pubkey pubkey; + int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pkey, publen); if ( ret ) { - ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak); + ret = rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, tweak); } if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; } intsarray[0] = outputLen; @@ -324,7 +324,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p return retArray; } -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1pubkey_1combine (JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys) { (void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys; @@ -332,24 +332,24 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1p return 0; } -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdh (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) { - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l; const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject); const unsigned char* pubdata = (const unsigned char*) (secdata + 32); jobjectArray retArray; jbyteArray outArray, intsByteArray; unsigned char intsarray[1]; - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; unsigned char nonce_res[32]; size_t outputLen = 32; - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); + int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); if (ret) { - ret = secp256k1_ecdh( + ret = rustsecp256k1_v0_1_0_ecdh( ctx, nonce_res, &pubkey, diff --git a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h index fe613c9..a0893d8 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h +++ b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h @@ -10,106 +10,106 @@ extern "C" { #endif /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ctx_clone + * Method: rustsecp256k1_v0_1_0_ctx_clone * Signature: (J)J */ -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ctx_1clone (JNIEnv *, jclass, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_context_randomize + * Method: rustsecp256k1_v0_1_0_context_randomize * Signature: (Ljava/nio/ByteBuffer;J)I */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1context_1randomize (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_privkey_tweak_add + * Method: rustsecp256k1_v0_1_0_privkey_tweak_add * Signature: (Ljava/nio/ByteBuffer;J)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1add (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_privkey_tweak_mul + * Method: rustsecp256k1_v0_1_0_privkey_tweak_mul * Signature: (Ljava/nio/ByteBuffer;J)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1mul (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_pubkey_tweak_add + * Method: rustsecp256k1_v0_1_0_pubkey_tweak_add * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1add (JNIEnv *, jclass, jobject, jlong, jint); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_pubkey_tweak_mul + * Method: rustsecp256k1_v0_1_0_pubkey_tweak_mul * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1mul (JNIEnv *, jclass, jobject, jlong, jint); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_destroy_context + * Method: rustsecp256k1_v0_1_0_destroy_context * Signature: (J)V */ -SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context +SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1destroy_1context (JNIEnv *, jclass, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdsa_verify + * Method: rustsecp256k1_v0_1_0_ecdsa_verify * Signature: (Ljava/nio/ByteBuffer;JII)I */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1verify (JNIEnv *, jclass, jobject, jlong, jint, jint); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdsa_sign + * Method: rustsecp256k1_v0_1_0_ecdsa_sign * Signature: (Ljava/nio/ByteBuffer;J)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1sign (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_seckey_verify + * Method: rustsecp256k1_v0_1_0_ec_seckey_verify * Signature: (Ljava/nio/ByteBuffer;J)I */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1seckey_1verify (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_pubkey_create + * Method: rustsecp256k1_v0_1_0_ec_pubkey_create * Signature: (Ljava/nio/ByteBuffer;J)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1create (JNIEnv *, jclass, jobject, jlong); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_pubkey_parse + * Method: rustsecp256k1_v0_1_0_ec_pubkey_parse * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1parse (JNIEnv *, jclass, jobject, jlong, jint); /* * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdh + * Method: rustsecp256k1_v0_1_0_ecdh * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdh (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen); diff --git a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.c b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.c index a52939e..c3f88ab 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.c +++ b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.c @@ -3,10 +3,10 @@ #include "org_bitcoin_Secp256k1Context.h" #include "include/secp256k1.h" -SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context +SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_rustsecp256k1_v0_1_0_1init_1context (JNIEnv* env, jclass classObject) { - secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_1_0_context *ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); (void)classObject;(void)env; diff --git a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.h b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.h index 0d2bc84..9b82b47 100644 --- a/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.h +++ b/secp256k1-sys/depend/secp256k1/src/java/org_bitcoin_Secp256k1Context.h @@ -10,10 +10,10 @@ extern "C" { #endif /* * Class: org_bitcoin_Secp256k1Context - * Method: secp256k1_init_context + * Method: rustsecp256k1_v0_1_0_init_context * Signature: ()J */ -SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context +SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_rustsecp256k1_v0_1_0_1init_1context (JNIEnv *, jclass); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include index e3088b4..be73d93 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/secp256k1_ecdh.h +include_HEADERS += include/rustsecp256k1_v0_1_0_ecdh.h noinst_HEADERS += src/modules/ecdh/main_impl.h noinst_HEADERS += src/modules/ecdh/tests_impl.h if USE_BENCHMARK diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h index 44cb68e..c14535b 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h @@ -12,55 +12,55 @@ static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x, const unsigned char *y, void *data) { unsigned char version = (y[31] & 0x01) | 0x02; - secp256k1_sha256 sha; + rustsecp256k1_v0_1_0_sha256 sha; (void)data; - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, &version, 1); - secp256k1_sha256_write(&sha, x, 32); - secp256k1_sha256_finalize(&sha, output); + rustsecp256k1_v0_1_0_sha256_initialize(&sha); + rustsecp256k1_v0_1_0_sha256_write(&sha, &version, 1); + rustsecp256k1_v0_1_0_sha256_write(&sha, x, 32); + rustsecp256k1_v0_1_0_sha256_finalize(&sha, output); return 1; } -const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; -const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_default = ecdh_hash_function_sha256; +const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; +const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_default = ecdh_hash_function_sha256; -int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *output, const secp256k1_pubkey *point, const unsigned char *scalar, secp256k1_ecdh_hash_function hashfp, void *data) { +int rustsecp256k1_v0_1_0_ecdh(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_1_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_1_0_ecdh_hash_function hashfp, void *data) { int ret = 0; int overflow = 0; - secp256k1_gej res; - secp256k1_ge pt; - secp256k1_scalar s; + rustsecp256k1_v0_1_0_gej res; + rustsecp256k1_v0_1_0_ge pt; + rustsecp256k1_v0_1_0_scalar s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(point != NULL); ARG_CHECK(scalar != NULL); if (hashfp == NULL) { - hashfp = secp256k1_ecdh_hash_function_default; + hashfp = rustsecp256k1_v0_1_0_ecdh_hash_function_default; } - secp256k1_pubkey_load(ctx, &pt, point); - secp256k1_scalar_set_b32(&s, scalar, &overflow); - if (overflow || secp256k1_scalar_is_zero(&s)) { + rustsecp256k1_v0_1_0_pubkey_load(ctx, &pt, point); + rustsecp256k1_v0_1_0_scalar_set_b32(&s, scalar, &overflow); + if (overflow || rustsecp256k1_v0_1_0_scalar_is_zero(&s)) { ret = 0; } else { unsigned char x[32]; unsigned char y[32]; - secp256k1_ecmult_const(&res, &pt, &s, 256); - secp256k1_ge_set_gej(&pt, &res); + rustsecp256k1_v0_1_0_ecmult_const(&res, &pt, &s, 256); + rustsecp256k1_v0_1_0_ge_set_gej(&pt, &res); /* Compute a hash of the point */ - secp256k1_fe_normalize(&pt.x); - secp256k1_fe_normalize(&pt.y); - secp256k1_fe_get_b32(x, &pt.x); - secp256k1_fe_get_b32(y, &pt.y); + rustsecp256k1_v0_1_0_fe_normalize(&pt.x); + rustsecp256k1_v0_1_0_fe_normalize(&pt.y); + rustsecp256k1_v0_1_0_fe_get_b32(x, &pt.x); + rustsecp256k1_v0_1_0_fe_get_b32(y, &pt.y); ret = hashfp(output, x, y, data); } - secp256k1_scalar_clear(&s); + rustsecp256k1_v0_1_0_scalar_clear(&s); return ret; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h index fe26e8f..084811f 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h @@ -26,69 +26,69 @@ int ecdh_hash_function_custom(unsigned char *output, const unsigned char *x, con void test_ecdh_api(void) { /* Setup context that just counts errors */ - secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_pubkey point; + rustsecp256k1_v0_1_0_context *tctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN); + rustsecp256k1_v0_1_0_pubkey point; unsigned char res[32]; unsigned char s_one[32] = { 0 }; int32_t ecount = 0; s_one[31] = 1; - secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); - CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1); + rustsecp256k1_v0_1_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(tctx, &point, s_one) == 1); /* Check all NULLs are detected */ - CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 0); - CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 3); /* Cleanup */ - secp256k1_context_destroy(tctx); + rustsecp256k1_v0_1_0_context_destroy(tctx); } void test_ecdh_generator_basepoint(void) { unsigned char s_one[32] = { 0 }; - secp256k1_pubkey point[2]; + rustsecp256k1_v0_1_0_pubkey point[2]; int i; s_one[31] = 1; /* Check against pubkey creation when the basepoint is the generator */ for (i = 0; i < 100; ++i) { - secp256k1_sha256 sha; + rustsecp256k1_v0_1_0_sha256 sha; unsigned char s_b32[32]; unsigned char output_ecdh[65]; unsigned char output_ser[32]; unsigned char point_ser[65]; size_t point_ser_len = sizeof(point_ser); - secp256k1_scalar s; + rustsecp256k1_v0_1_0_scalar s; random_scalar_order(&s); - secp256k1_scalar_get_b32(s_b32, &s); + rustsecp256k1_v0_1_0_scalar_get_b32(s_b32, &s); - CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point[0], s_one) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1); /* compute using ECDH function with custom hash function */ - CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); /* compute "explicitly" */ - CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); /* compare */ CHECK(memcmp(output_ecdh, point_ser, 65) == 0); /* compute using ECDH function with default hash function */ - CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); /* compute "explicitly" */ - CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, point_ser, point_ser_len); - secp256k1_sha256_finalize(&sha, output_ser); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); + rustsecp256k1_v0_1_0_sha256_initialize(&sha); + rustsecp256k1_v0_1_0_sha256_write(&sha, point_ser, point_ser_len); + rustsecp256k1_v0_1_0_sha256_finalize(&sha, output_ser); /* compare */ CHECK(memcmp(output_ecdh, output_ser, 32) == 0); } @@ -104,23 +104,23 @@ void test_bad_scalar(void) { }; unsigned char s_rand[32] = { 0 }; unsigned char output[32]; - secp256k1_scalar rand; - secp256k1_pubkey point; + rustsecp256k1_v0_1_0_scalar rand; + rustsecp256k1_v0_1_0_pubkey point; /* Create random point */ random_scalar_order(&rand); - secp256k1_scalar_get_b32(s_rand, &rand); - CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1); + rustsecp256k1_v0_1_0_scalar_get_b32(s_rand, &rand); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point, s_rand) == 1); /* Try to multiply it by bad values */ - CHECK(secp256k1_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); - CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); /* ...and a good one */ s_overflow[31] -= 1; - CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); /* Hash function failure results in ecdh failure */ - CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); } void run_ecdh_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include index bf23c26..776f3b5 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/secp256k1_recovery.h +include_HEADERS += include/rustsecp256k1_v0_1_0_recovery.h noinst_HEADERS += src/modules/recovery/main_impl.h noinst_HEADERS += src/modules/recovery/tests_impl.h if USE_BENCHMARK diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h index 2f6691c..ed4e319 100755 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h @@ -9,34 +9,34 @@ #include "include/secp256k1_recovery.h" -static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) { +static void rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_scalar* r, rustsecp256k1_v0_1_0_scalar* s, int* recid, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig) { (void)ctx; - if (sizeof(secp256k1_scalar) == 32) { - /* When the secp256k1_scalar type is exactly 32 byte, use its - * representation inside secp256k1_ecdsa_signature, as conversion is very fast. - * Note that secp256k1_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) { + /* When the rustsecp256k1_v0_1_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_1_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_1_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - secp256k1_scalar_set_b32(r, &sig->data[0], NULL); - secp256k1_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(s, &sig->data[32], NULL); } *recid = sig->data[64]; } -static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) { - if (sizeof(secp256k1_scalar) == 32) { +static void rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_1_0_scalar* r, const rustsecp256k1_v0_1_0_scalar* s, int recid) { + if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - secp256k1_scalar_get_b32(&sig->data[0], r); - secp256k1_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[32], s); } sig->data[64] = recid; } -int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { + rustsecp256k1_v0_1_0_scalar r, s; int ret = 1; int overflow = 0; @@ -45,144 +45,144 @@ int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ARG_CHECK(input64 != NULL); ARG_CHECK(recid >= 0 && recid <= 3); - secp256k1_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - secp256k1_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig) { + rustsecp256k1_v0_1_0_scalar r, s; (void)ctx; ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(recid != NULL); - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); - secp256k1_scalar_get_b32(&output64[0], &r); - secp256k1_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); + rustsecp256k1_v0_1_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_1_0_scalar_get_b32(&output64[32], &s); return 1; } -int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sigin) { + rustsecp256k1_v0_1_0_scalar r, s; int recid; (void)ctx; ARG_CHECK(sig != NULL); ARG_CHECK(sigin != NULL); - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); - secp256k1_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); + rustsecp256k1_v0_1_0_ecdsa_signature_save(sig, &r, &s); return 1; } -static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) { +static int rustsecp256k1_v0_1_0_ecdsa_sig_recover(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar *sigr, const rustsecp256k1_v0_1_0_scalar* sigs, rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message, int recid) { unsigned char brx[32]; - secp256k1_fe fx; - secp256k1_ge x; - secp256k1_gej xj; - secp256k1_scalar rn, u1, u2; - secp256k1_gej qj; + rustsecp256k1_v0_1_0_fe fx; + rustsecp256k1_v0_1_0_ge x; + rustsecp256k1_v0_1_0_gej xj; + rustsecp256k1_v0_1_0_scalar rn, u1, u2; + rustsecp256k1_v0_1_0_gej qj; int r; - if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_1_0_scalar_is_zero(sigr) || rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) { return 0; } - secp256k1_scalar_get_b32(brx, sigr); - r = secp256k1_fe_set_b32(&fx, brx); + rustsecp256k1_v0_1_0_scalar_get_b32(brx, sigr); + r = rustsecp256k1_v0_1_0_fe_set_b32(&fx, brx); (void)r; VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { - if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_1_0_fe_cmp_var(&fx, &rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order) >= 0) { return 0; } - secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe); + rustsecp256k1_v0_1_0_fe_add(&fx, &rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe); } - if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) { + if (!rustsecp256k1_v0_1_0_ge_set_xo_var(&x, &fx, recid & 1)) { return 0; } - secp256k1_gej_set_ge(&xj, &x); - secp256k1_scalar_inverse_var(&rn, sigr); - secp256k1_scalar_mul(&u1, &rn, message); - secp256k1_scalar_negate(&u1, &u1); - secp256k1_scalar_mul(&u2, &rn, sigs); - secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1); - secp256k1_ge_set_gej_var(pubkey, &qj); - return !secp256k1_gej_is_infinity(&qj); + rustsecp256k1_v0_1_0_gej_set_ge(&xj, &x); + rustsecp256k1_v0_1_0_scalar_inverse_var(&rn, sigr); + rustsecp256k1_v0_1_0_scalar_mul(&u1, &rn, message); + rustsecp256k1_v0_1_0_scalar_negate(&u1, &u1); + rustsecp256k1_v0_1_0_scalar_mul(&u2, &rn, sigs); + rustsecp256k1_v0_1_0_ecmult(ctx, &qj, &xj, &u2, &u1); + rustsecp256k1_v0_1_0_ge_set_gej_var(pubkey, &qj); + return !rustsecp256k1_v0_1_0_gej_is_infinity(&qj); } -int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar r, s; - secp256k1_scalar sec, non, msg; +int rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_1_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_1_0_scalar r, s; + rustsecp256k1_v0_1_0_scalar sec, non, msg; int recid; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; + noncefp = rustsecp256k1_v0_1_0_nonce_function_default; } - secp256k1_scalar_set_b32(&sec, seckey, &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ - if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + if (!overflow && !rustsecp256k1_v0_1_0_scalar_is_zero(&sec)) { unsigned char nonce32[32]; unsigned int count = 0; - secp256k1_scalar_set_b32(&msg, msg32, NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(&msg, msg32, NULL); while (1) { ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - if (!secp256k1_scalar_is_zero(&non) && !overflow) { - if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { + rustsecp256k1_v0_1_0_scalar_set_b32(&non, nonce32, &overflow); + if (!rustsecp256k1_v0_1_0_scalar_is_zero(&non) && !overflow) { + if (rustsecp256k1_v0_1_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { break; } } count++; } memset(nonce32, 0, 32); - secp256k1_scalar_clear(&msg); - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&msg); + rustsecp256k1_v0_1_0_scalar_clear(&non); + rustsecp256k1_v0_1_0_scalar_clear(&sec); } if (ret) { - secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid); } else { memset(signature, 0, sizeof(*signature)); } return ret; } -int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) { - secp256k1_ge q; - secp256k1_scalar r, s; - secp256k1_scalar m; +int rustsecp256k1_v0_1_0_ecdsa_recover(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *signature, const unsigned char *msg32) { + rustsecp256k1_v0_1_0_ge q; + rustsecp256k1_v0_1_0_scalar r, s; + rustsecp256k1_v0_1_0_scalar m; int recid; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(pubkey != NULL); - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ - secp256k1_scalar_set_b32(&m, msg32, NULL); - if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { - secp256k1_pubkey_save(pubkey, &q); + rustsecp256k1_v0_1_0_scalar_set_b32(&m, msg32, NULL); + if (rustsecp256k1_v0_1_0_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &q); return 1; } else { memset(pubkey, 0, sizeof(*pubkey)); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h index 5c9bbe8..639c0ac 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h @@ -25,19 +25,19 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c } /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ memset(nonce32, 1, 32); - return secp256k1_rand_bits(1); + return rustsecp256k1_v0_1_0_rand_bits(1); } void test_ecdsa_recovery_api(void) { /* Setup contexts that just count errors */ - secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - secp256k1_pubkey pubkey; - secp256k1_pubkey recpubkey; - secp256k1_ecdsa_signature normal_sig; - secp256k1_ecdsa_recoverable_signature recsig; + rustsecp256k1_v0_1_0_context *none = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_1_0_context *sign = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN); + rustsecp256k1_v0_1_0_context *vrfy = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_1_0_context *both = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey recpubkey; + rustsecp256k1_v0_1_0_ecdsa_signature normal_sig; + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature recsig; unsigned char privkey[32] = { 1 }; unsigned char message[32] = { 2 }; int32_t ecount = 0; @@ -49,159 +49,159 @@ void test_ecdsa_recovery_api(void) { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Check bad contexts and NULLs for signing */ ecount = 0; - CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); CHECK(ecount == 5); /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ - secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); + rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); CHECK(ecount == 5); /* These will all fail, but not in ARG_CHECK way */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); /* This one will succeed. */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 5); /* Check signing with a goofy nonce function */ /* Check bad contexts and NULLs for recovery */ ecount = 0; - CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, NULL, &recsig, message) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, NULL, message) == 0); CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); CHECK(ecount == 5); /* Check NULLs for conversion */ - CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); /* Check NULLs for de/serialization */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); CHECK(ecount == 6); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); CHECK(ecount == 7); /* overflow in signature will fail but not affect ecount */ memcpy(sig, over_privkey, 32); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); CHECK(ecount == 7); /* cleanup */ - secp256k1_context_destroy(none); - secp256k1_context_destroy(sign); - secp256k1_context_destroy(vrfy); - secp256k1_context_destroy(both); + rustsecp256k1_v0_1_0_context_destroy(none); + rustsecp256k1_v0_1_0_context_destroy(sign); + rustsecp256k1_v0_1_0_context_destroy(vrfy); + rustsecp256k1_v0_1_0_context_destroy(both); } void test_ecdsa_recovery_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; unsigned char message[32]; - secp256k1_ecdsa_signature signature[5]; - secp256k1_ecdsa_recoverable_signature rsignature[5]; + rustsecp256k1_v0_1_0_ecdsa_signature signature[5]; + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsignature[5]; unsigned char sig[74]; - secp256k1_pubkey pubkey; - secp256k1_pubkey recpubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey recpubkey; int recid = 0; /* Generate a random key and message. */ { - secp256k1_scalar msg, key; + rustsecp256k1_v0_1_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_scalar_get_b32(message, &msg); + rustsecp256k1_v0_1_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_1_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Serialize/parse compact and verify/recover. */ extra[0] = 0; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); CHECK(memcmp(&signature[4], &signature[0], 64) == 0); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); memset(&rsignature[4], 0, sizeof(rsignature[4])); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); /* Parse compact (with recovery id) and recover. */ - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); /* Serialize/destroy/parse signature and verify again. */ - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + sig[rustsecp256k1_v0_1_0_rand_bits(6)] += 1 + rustsecp256k1_v0_1_0_rand_int(255); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); /* Recover again */ - CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); } @@ -225,7 +225,7 @@ void test_ecdsa_recovery_edge_cases(void) { 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86, 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57 }; - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ const unsigned char sigb64[64] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -237,19 +237,19 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, }; - secp256k1_pubkey pubkeyb; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pubkeyb; + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_1_0_ecdsa_signature sig; int recid; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); for (recid = 0; recid < 4; recid++) { int i; @@ -294,40 +294,40 @@ void test_ecdsa_recovery_edge_cases(void) { 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 }; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); for (recid2 = 0; recid2 < 4; recid2++) { - secp256k1_pubkey pubkey2b; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); + rustsecp256k1_v0_1_0_pubkey pubkey2b; + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); /* Verifying with (order + r,4) should always fail. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } /* DER parsing tests. */ /* Zero length r/s. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); /* Leading zeros. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); sigbderalt3[4] = 1; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbderalt4[7] = 1; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); /* Damage signature. */ sigbder[7]++; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbder[7]--; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); for(i = 0; i < 8; i++) { int c; unsigned char orig = sigbder[i]; @@ -337,7 +337,7 @@ void test_ecdsa_recovery_edge_cases(void) { continue; } sigbder[i] = c; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } sigbder[i] = orig; } @@ -357,25 +357,25 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; - secp256k1_pubkey pubkeyc; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); + rustsecp256k1_v0_1_0_pubkey pubkeyc; + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); sigcder[4] = 0; sigc64[31] = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); sigcder[4] = 1; sigcder[7] = 0; sigc64[31] = 1; sigc64[63] = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/num.h b/secp256k1-sys/depend/secp256k1/src/num.h index 49f2dd7..896635d 100644 --- a/secp256k1-sys/depend/secp256k1/src/num.h +++ b/secp256k1-sys/depend/secp256k1/src/num.h @@ -20,54 +20,54 @@ #endif /** Copy a number. */ -static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a); +static void rustsecp256k1_v0_1_0_num_copy(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a); /** Convert a number's absolute value to a binary big-endian string. * There must be enough place. */ -static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a); +static void rustsecp256k1_v0_1_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_1_0_num *a); /** Set a number to the value of a binary big-endian string. */ -static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen); +static void rustsecp256k1_v0_1_0_num_set_bin(rustsecp256k1_v0_1_0_num *r, const unsigned char *a, unsigned int alen); /** Compute a modular inverse. The input must be less than the modulus. */ -static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m); +static void rustsecp256k1_v0_1_0_num_mod_inverse(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *m); /** Compute the jacobi symbol (a|b). b must be positive and odd. */ -static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b); +static int rustsecp256k1_v0_1_0_num_jacobi(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Compare the absolute value of two numbers. */ -static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b); +static int rustsecp256k1_v0_1_0_num_cmp(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Test whether two number are equal (including sign). */ -static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b); +static int rustsecp256k1_v0_1_0_num_eq(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Add two (signed) numbers. */ -static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); +static void rustsecp256k1_v0_1_0_num_add(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Subtract two (signed) numbers. */ -static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); +static void rustsecp256k1_v0_1_0_num_sub(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Multiply two (signed) numbers. */ -static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); +static void rustsecp256k1_v0_1_0_num_mul(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b); /** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, even if r was negative. */ -static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m); +static void rustsecp256k1_v0_1_0_num_mod(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *m); /** Right-shift the passed number by bits bits. */ -static void secp256k1_num_shift(secp256k1_num *r, int bits); +static void rustsecp256k1_v0_1_0_num_shift(rustsecp256k1_v0_1_0_num *r, int bits); /** Check whether a number is zero. */ -static int secp256k1_num_is_zero(const secp256k1_num *a); +static int rustsecp256k1_v0_1_0_num_is_zero(const rustsecp256k1_v0_1_0_num *a); /** Check whether a number is one. */ -static int secp256k1_num_is_one(const secp256k1_num *a); +static int rustsecp256k1_v0_1_0_num_is_one(const rustsecp256k1_v0_1_0_num *a); /** Check whether a number is strictly negative. */ -static int secp256k1_num_is_neg(const secp256k1_num *a); +static int rustsecp256k1_v0_1_0_num_is_neg(const rustsecp256k1_v0_1_0_num *a); /** Change a number's sign. */ -static void secp256k1_num_negate(secp256k1_num *r); +static void rustsecp256k1_v0_1_0_num_negate(rustsecp256k1_v0_1_0_num *r); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/num_gmp.h b/secp256k1-sys/depend/secp256k1/src/num_gmp.h index 3619844..62f5937 100644 --- a/secp256k1-sys/depend/secp256k1/src/num_gmp.h +++ b/secp256k1-sys/depend/secp256k1/src/num_gmp.h @@ -15,6 +15,6 @@ typedef struct { mp_limb_t data[2*NUM_LIMBS]; int neg; int limbs; -} secp256k1_num; +} rustsecp256k1_v0_1_0_num; #endif /* SECP256K1_NUM_REPR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h b/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h index 0ae2a8b..0623cb4 100644 --- a/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h @@ -15,18 +15,18 @@ #include "num.h" #ifdef VERIFY -static void secp256k1_num_sanity(const secp256k1_num *a) { +static void rustsecp256k1_v0_1_0_num_sanity(const rustsecp256k1_v0_1_0_num *a) { VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0)); } #else -#define secp256k1_num_sanity(a) do { } while(0) +#define rustsecp256k1_v0_1_0_num_sanity(a) do { } while(0) #endif -static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) { +static void rustsecp256k1_v0_1_0_num_copy(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a) { *r = *a; } -static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) { +static void rustsecp256k1_v0_1_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_1_0_num *a) { unsigned char tmp[65]; int len = 0; int shift = 0; @@ -42,7 +42,7 @@ static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const sec memset(tmp, 0, sizeof(tmp)); } -static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) { +static void rustsecp256k1_v0_1_0_num_set_bin(rustsecp256k1_v0_1_0_num *r, const unsigned char *a, unsigned int alen) { int len; VERIFY_CHECK(alen > 0); VERIFY_CHECK(alen <= 64); @@ -59,7 +59,7 @@ static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsi } } -static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { +static void rustsecp256k1_v0_1_0_num_add_abs(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); r->limbs = a->limbs; if (c != 0) { @@ -68,7 +68,7 @@ static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, cons } } -static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { +static void rustsecp256k1_v0_1_0_num_sub_abs(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs); (void)c; VERIFY_CHECK(c == 0); @@ -78,9 +78,9 @@ static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, cons } } -static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) { - secp256k1_num_sanity(r); - secp256k1_num_sanity(m); +static void rustsecp256k1_v0_1_0_num_mod(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *m) { + rustsecp256k1_v0_1_0_num_sanity(r); + rustsecp256k1_v0_1_0_num_sanity(m); if (r->limbs >= m->limbs) { mp_limb_t t[2*NUM_LIMBS]; @@ -93,20 +93,20 @@ static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) { } if (r->neg && (r->limbs > 1 || r->data[0] != 0)) { - secp256k1_num_sub_abs(r, m, r); + rustsecp256k1_v0_1_0_num_sub_abs(r, m, r); r->neg = 0; } } -static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) { +static void rustsecp256k1_v0_1_0_num_mod_inverse(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *m) { int i; mp_limb_t g[NUM_LIMBS+1]; mp_limb_t u[NUM_LIMBS+1]; mp_limb_t v[NUM_LIMBS+1]; mp_size_t sn; mp_size_t gn; - secp256k1_num_sanity(a); - secp256k1_num_sanity(m); + rustsecp256k1_v0_1_0_num_sanity(a); + rustsecp256k1_v0_1_0_num_sanity(m); /** mpn_gcdext computes: (G,S) = gcdext(U,V), where * * G = gcd(U,V) @@ -144,11 +144,11 @@ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, memset(v, 0, sizeof(v)); } -static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) { +static int rustsecp256k1_v0_1_0_num_jacobi(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { int ret; mpz_t ga, gb; - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); + rustsecp256k1_v0_1_0_num_sanity(a); + rustsecp256k1_v0_1_0_num_sanity(b); VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); mpz_inits(ga, gb, NULL); @@ -166,19 +166,19 @@ static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) return ret; } -static int secp256k1_num_is_one(const secp256k1_num *a) { +static int rustsecp256k1_v0_1_0_num_is_one(const rustsecp256k1_v0_1_0_num *a) { return (a->limbs == 1 && a->data[0] == 1); } -static int secp256k1_num_is_zero(const secp256k1_num *a) { +static int rustsecp256k1_v0_1_0_num_is_zero(const rustsecp256k1_v0_1_0_num *a) { return (a->limbs == 1 && a->data[0] == 0); } -static int secp256k1_num_is_neg(const secp256k1_num *a) { +static int rustsecp256k1_v0_1_0_num_is_neg(const rustsecp256k1_v0_1_0_num *a) { return (a->limbs > 1 || a->data[0] != 0) && a->neg; } -static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) { +static int rustsecp256k1_v0_1_0_num_cmp(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { if (a->limbs > b->limbs) { return 1; } @@ -188,54 +188,54 @@ static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) { return mpn_cmp(a->data, b->data, a->limbs); } -static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) { +static int rustsecp256k1_v0_1_0_num_eq(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { if (a->limbs > b->limbs) { return 0; } if (a->limbs < b->limbs) { return 0; } - if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) { + if ((a->neg && !rustsecp256k1_v0_1_0_num_is_zero(a)) != (b->neg && !rustsecp256k1_v0_1_0_num_is_zero(b))) { return 0; } return mpn_cmp(a->data, b->data, a->limbs) == 0; } -static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) { +static void rustsecp256k1_v0_1_0_num_subadd(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b, int bneg) { if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ r->neg = a->neg; if (a->limbs >= b->limbs) { - secp256k1_num_add_abs(r, a, b); + rustsecp256k1_v0_1_0_num_add_abs(r, a, b); } else { - secp256k1_num_add_abs(r, b, a); + rustsecp256k1_v0_1_0_num_add_abs(r, b, a); } } else { - if (secp256k1_num_cmp(a, b) > 0) { + if (rustsecp256k1_v0_1_0_num_cmp(a, b) > 0) { r->neg = a->neg; - secp256k1_num_sub_abs(r, a, b); + rustsecp256k1_v0_1_0_num_sub_abs(r, a, b); } else { r->neg = b->neg ^ bneg; - secp256k1_num_sub_abs(r, b, a); + rustsecp256k1_v0_1_0_num_sub_abs(r, b, a); } } } -static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - secp256k1_num_subadd(r, a, b, 0); +static void rustsecp256k1_v0_1_0_num_add(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { + rustsecp256k1_v0_1_0_num_sanity(a); + rustsecp256k1_v0_1_0_num_sanity(b); + rustsecp256k1_v0_1_0_num_subadd(r, a, b, 0); } -static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - secp256k1_num_subadd(r, a, b, 1); +static void rustsecp256k1_v0_1_0_num_sub(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { + rustsecp256k1_v0_1_0_num_sanity(a); + rustsecp256k1_v0_1_0_num_sanity(b); + rustsecp256k1_v0_1_0_num_subadd(r, a, b, 1); } -static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { +static void rustsecp256k1_v0_1_0_num_mul(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) { mp_limb_t tmp[2*NUM_LIMBS+1]; - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); + rustsecp256k1_v0_1_0_num_sanity(a); + rustsecp256k1_v0_1_0_num_sanity(b); VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1); if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) { @@ -259,7 +259,7 @@ static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const se memset(tmp, 0, sizeof(tmp)); } -static void secp256k1_num_shift(secp256k1_num *r, int bits) { +static void rustsecp256k1_v0_1_0_num_shift(rustsecp256k1_v0_1_0_num *r, int bits) { if (bits % GMP_NUMB_BITS) { /* Shift within limbs. */ mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); @@ -281,7 +281,7 @@ static void secp256k1_num_shift(secp256k1_num *r, int bits) { } } -static void secp256k1_num_negate(secp256k1_num *r) { +static void rustsecp256k1_v0_1_0_num_negate(rustsecp256k1_v0_1_0_num *r) { r->neg ^= 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/scalar.h b/secp256k1-sys/depend/secp256k1/src/scalar.h index 59304cb..5c4f1c7 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar.h @@ -24,83 +24,83 @@ #endif /** Clear a scalar to prevent the leak of sensitive data. */ -static void secp256k1_scalar_clear(secp256k1_scalar *r); +static void rustsecp256k1_v0_1_0_scalar_clear(rustsecp256k1_v0_1_0_scalar *r); /** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ -static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count); /** Access bits from a scalar. Not constant time. */ -static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits_var(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. */ -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow); +static void rustsecp256k1_v0_1_0_scalar_set_b32(rustsecp256k1_v0_1_0_scalar *r, const unsigned char *bin, int *overflow); /** Set a scalar to an unsigned integer. */ -static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v); +static void rustsecp256k1_v0_1_0_scalar_set_int(rustsecp256k1_v0_1_0_scalar *r, unsigned int v); /** Convert a scalar to a byte array. */ -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a); +static void rustsecp256k1_v0_1_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_1_0_scalar* a); /** Add two scalars together (modulo the group order). Returns whether it overflowed. */ -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); +static int rustsecp256k1_v0_1_0_scalar_add(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b); /** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag); +static void rustsecp256k1_v0_1_0_scalar_cadd_bit(rustsecp256k1_v0_1_0_scalar *r, unsigned int bit, int flag); /** Multiply two scalars (modulo the group order). */ -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); +static void rustsecp256k1_v0_1_0_scalar_mul(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b); /** Shift a scalar right by some amount strictly between 0 and 16, returning * the low bits that were shifted off */ -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n); +static int rustsecp256k1_v0_1_0_scalar_shr_int(rustsecp256k1_v0_1_0_scalar *r, int n); /** Compute the square of a scalar (modulo the group order). */ -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_sqr(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a); /** Compute the inverse of a scalar (modulo the group order). */ -static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_inverse(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a); /** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ -static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_inverse_var(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a); /** Compute the complement of a scalar (modulo the group order). */ -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_negate(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a); /** Check whether a scalar equals zero. */ -static int secp256k1_scalar_is_zero(const secp256k1_scalar *a); +static int rustsecp256k1_v0_1_0_scalar_is_zero(const rustsecp256k1_v0_1_0_scalar *a); /** Check whether a scalar equals one. */ -static int secp256k1_scalar_is_one(const secp256k1_scalar *a); +static int rustsecp256k1_v0_1_0_scalar_is_one(const rustsecp256k1_v0_1_0_scalar *a); /** Check whether a scalar, considered as an nonnegative integer, is even. */ -static int secp256k1_scalar_is_even(const secp256k1_scalar *a); +static int rustsecp256k1_v0_1_0_scalar_is_even(const rustsecp256k1_v0_1_0_scalar *a); /** Check whether a scalar is higher than the group order divided by 2. */ -static int secp256k1_scalar_is_high(const secp256k1_scalar *a); +static int rustsecp256k1_v0_1_0_scalar_is_high(const rustsecp256k1_v0_1_0_scalar *a); /** Conditionally negate a number, in constant time. * Returns -1 if the number was negated, 1 otherwise */ -static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag); +static int rustsecp256k1_v0_1_0_scalar_cond_negate(rustsecp256k1_v0_1_0_scalar *a, int flag); #ifndef USE_NUM_NONE /** Convert a scalar to a number. */ -static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_get_num(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_scalar *a); /** Get the order of the group as a number. */ -static void secp256k1_scalar_order_get_num(secp256k1_num *r); +static void rustsecp256k1_v0_1_0_scalar_order_get_num(rustsecp256k1_v0_1_0_num *r); #endif /** Compare two scalars. */ -static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b); +static int rustsecp256k1_v0_1_0_scalar_eq(const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b); #ifdef USE_ENDOMORPHISM /** Find r1 and r2 such that r1+r2*2^128 = a. */ -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); -/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); +static void rustsecp256k1_v0_1_0_scalar_split_128(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a); +/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see rustsecp256k1_v0_1_0_gej_mul_lambda). */ +static void rustsecp256k1_v0_1_0_scalar_split_lambda(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a); #endif /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ -static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); +static void rustsecp256k1_v0_1_0_scalar_mul_shift_var(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b, unsigned int shift); #endif /* SECP256K1_SCALAR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h index 19c7495..1af4c7b 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint64_t d[4]; -} secp256k1_scalar; +} rustsecp256k1_v0_1_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h index d378335..803adb5 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h @@ -24,37 +24,37 @@ #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_clear(rustsecp256k1_v0_1_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_set_int(rustsecp256k1_v0_1_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits_var(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 6 == offset >> 6) { - return secp256k1_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_1_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 6) + 1 < 4); return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); } } -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_check_overflow(const rustsecp256k1_v0_1_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ @@ -66,7 +66,7 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal return yes; } -SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_reduce(rustsecp256k1_v0_1_0_scalar *r, unsigned int overflow) { uint128_t t; VERIFY_CHECK(overflow <= 1); t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; @@ -80,7 +80,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne return overflow; } -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static int rustsecp256k1_v0_1_0_scalar_add(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { int overflow; uint128_t t = (uint128_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; @@ -90,13 +90,13 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - overflow = t + secp256k1_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_1_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - secp256k1_scalar_reduce(r, overflow); + rustsecp256k1_v0_1_0_scalar_reduce(r, overflow); return overflow; } -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_1_0_scalar_cadd_bit(rustsecp256k1_v0_1_0_scalar *r, unsigned int bit, int flag) { uint128_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ @@ -110,35 +110,35 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 64) == 0); - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_1_0_scalar_check_overflow(r) == 0); #endif } -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_1_0_scalar_set_b32(rustsecp256k1_v0_1_0_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; - over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); + over = rustsecp256k1_v0_1_0_scalar_reduce(r, rustsecp256k1_v0_1_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { +static void rustsecp256k1_v0_1_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_1_0_scalar* a) { bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_zero(const rustsecp256k1_v0_1_0_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_1_0_scalar_negate(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { + uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_1_0_scalar_is_zero(a) == 0); uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; @@ -149,11 +149,11 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar r->d[3] = t & nonzero; } -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_one(const rustsecp256k1_v0_1_0_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { +static int rustsecp256k1_v0_1_0_scalar_is_high(const rustsecp256k1_v0_1_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_H_3); @@ -165,11 +165,11 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { return yes; } -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { +static int rustsecp256k1_v0_1_0_scalar_cond_negate(rustsecp256k1_v0_1_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_1_0_scalar_negate */ uint64_t mask = !flag - 1; - uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; + uint64_t nonzero = (rustsecp256k1_v0_1_0_scalar_is_zero(r) != 0) - 1; uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -267,7 +267,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { VERIFY_CHECK(c2 == 0); \ } -static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) { +static void rustsecp256k1_v0_1_0_scalar_reduce_512(rustsecp256k1_v0_1_0_scalar *r, const uint64_t *l) { #ifdef USE_ASM_X86_64 /* Reduce 512 bits into 385. */ uint64_t m0, m1, m2, m3, m4, m5, m6; @@ -573,10 +573,10 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) #endif /* Final reduction of r. */ - secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); + rustsecp256k1_v0_1_0_scalar_reduce(r, c + rustsecp256k1_v0_1_0_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) { +static void rustsecp256k1_v0_1_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { #ifdef USE_ASM_X86_64 const uint64_t *pb = b->d; __asm__ __volatile__( @@ -743,7 +743,7 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c #endif } -static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_sqr_512(uint64_t l[8], const rustsecp256k1_v0_1_0_scalar *a) { #ifdef USE_ASM_X86_64 __asm__ __volatile__( /* Preload */ @@ -888,13 +888,13 @@ static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) { #undef extract #undef extract_fast -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static void rustsecp256k1_v0_1_0_scalar_mul(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { uint64_t l[8]; - secp256k1_scalar_mul_512(l, a, b); - secp256k1_scalar_reduce_512(r, l); + rustsecp256k1_v0_1_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_1_0_scalar_reduce_512(r, l); } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { +static int rustsecp256k1_v0_1_0_scalar_shr_int(rustsecp256k1_v0_1_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -906,14 +906,14 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_sqr(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { uint64_t l[8]; - secp256k1_scalar_sqr_512(l, a); - secp256k1_scalar_reduce_512(r, l); + rustsecp256k1_v0_1_0_scalar_sqr_512(l, a); + rustsecp256k1_v0_1_0_scalar_reduce_512(r, l); } #ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_split_128(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a) { r1->d[0] = a->d[0]; r1->d[1] = a->d[1]; r1->d[2] = 0; @@ -925,17 +925,17 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r } #endif -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_eq(const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } -SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_mul_shift_var(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b, unsigned int shift) { uint64_t l[8]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - secp256k1_scalar_mul_512(l, a, b); + rustsecp256k1_v0_1_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 6; shiftlow = shift & 0x3F; shifthigh = 64 - shiftlow; @@ -943,7 +943,7 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; - secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); + rustsecp256k1_v0_1_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h index 2c9a348..62d1c84 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint32_t d[8]; -} secp256k1_scalar; +} rustsecp256k1_v0_1_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h index 4f9ed61..80e4c14 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h @@ -34,7 +34,7 @@ #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_clear(rustsecp256k1_v0_1_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; @@ -45,7 +45,7 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { r->d[7] = 0; } -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_set_int(rustsecp256k1_v0_1_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; @@ -56,23 +56,23 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[7] = 0; } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits_var(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 5 == offset >> 5) { - return secp256k1_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_1_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 5) + 1 < 8); return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); } } -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_check_overflow(const rustsecp256k1_v0_1_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ @@ -90,7 +90,7 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal return yes; } -SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_reduce(rustsecp256k1_v0_1_0_scalar *r, uint32_t overflow) { uint64_t t; VERIFY_CHECK(overflow <= 1); t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; @@ -112,7 +112,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_ return overflow; } -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static int rustsecp256k1_v0_1_0_scalar_add(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; @@ -130,13 +130,13 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - overflow = t + secp256k1_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_1_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - secp256k1_scalar_reduce(r, overflow); + rustsecp256k1_v0_1_0_scalar_reduce(r, overflow); return overflow; } -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_1_0_scalar_cadd_bit(rustsecp256k1_v0_1_0_scalar *r, unsigned int bit, int flag) { uint64_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -158,11 +158,11 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int r->d[7] = t & 0xFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 32) == 0); - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_1_0_scalar_check_overflow(r) == 0); #endif } -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_1_0_scalar_set_b32(rustsecp256k1_v0_1_0_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; @@ -172,13 +172,13 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; - over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); + over = rustsecp256k1_v0_1_0_scalar_reduce(r, rustsecp256k1_v0_1_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { +static void rustsecp256k1_v0_1_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_1_0_scalar* a) { bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; @@ -189,12 +189,12 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_zero(const rustsecp256k1_v0_1_0_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_1_0_scalar_negate(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_1_0_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -213,11 +213,11 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar r->d[7] = t & nonzero; } -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_one(const rustsecp256k1_v0_1_0_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { +static int rustsecp256k1_v0_1_0_scalar_is_high(const rustsecp256k1_v0_1_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_H_7); @@ -235,11 +235,11 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { return yes; } -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { +static int rustsecp256k1_v0_1_0_scalar_cond_negate(rustsecp256k1_v0_1_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_1_0_scalar_negate */ uint32_t mask = !flag - 1; - uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_1_0_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -346,7 +346,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { VERIFY_CHECK(c2 == 0); \ } -static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) { +static void rustsecp256k1_v0_1_0_scalar_reduce_512(rustsecp256k1_v0_1_0_scalar *r, const uint32_t *l) { uint64_t c; uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; @@ -485,10 +485,10 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; /* Final reduction of r. */ - secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); + rustsecp256k1_v0_1_0_scalar_reduce(r, c + rustsecp256k1_v0_1_0_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static void rustsecp256k1_v0_1_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; @@ -576,7 +576,7 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con l[15] = c0; } -static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_sqr_512(uint32_t *l, const rustsecp256k1_v0_1_0_scalar *a) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; @@ -644,13 +644,13 @@ static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) { #undef extract #undef extract_fast -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static void rustsecp256k1_v0_1_0_scalar_mul(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { uint32_t l[16]; - secp256k1_scalar_mul_512(l, a, b); - secp256k1_scalar_reduce_512(r, l); + rustsecp256k1_v0_1_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_1_0_scalar_reduce_512(r, l); } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { +static int rustsecp256k1_v0_1_0_scalar_shr_int(rustsecp256k1_v0_1_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -666,14 +666,14 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_sqr(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { uint32_t l[16]; - secp256k1_scalar_sqr_512(l, a); - secp256k1_scalar_reduce_512(r, l); + rustsecp256k1_v0_1_0_scalar_sqr_512(l, a); + rustsecp256k1_v0_1_0_scalar_reduce_512(r, l); } #ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_split_128(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a) { r1->d[0] = a->d[0]; r1->d[1] = a->d[1]; r1->d[2] = a->d[2]; @@ -693,17 +693,17 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r } #endif -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_eq(const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } -SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_mul_shift_var(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b, unsigned int shift) { uint32_t l[16]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - secp256k1_scalar_mul_512(l, a, b); + rustsecp256k1_v0_1_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 5; shiftlow = shift & 0x1F; shifthigh = 32 - shiftlow; @@ -715,7 +715,7 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; - secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); + rustsecp256k1_v0_1_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h index fa79057..d12e80e 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h @@ -25,14 +25,14 @@ #endif #ifndef USE_NUM_NONE -static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_get_num(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_scalar *a) { unsigned char c[32]; - secp256k1_scalar_get_b32(c, a); - secp256k1_num_set_bin(r, c, 32); + rustsecp256k1_v0_1_0_scalar_get_b32(c, a); + rustsecp256k1_v0_1_0_num_set_bin(r, c, 32); } -/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ -static void secp256k1_scalar_order_get_num(secp256k1_num *r) { +/** secp256k1 curve order, see rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe in ecdsa_impl.h */ +static void rustsecp256k1_v0_1_0_scalar_order_get_num(rustsecp256k1_v0_1_0_num *r) { #if defined(EXHAUSTIVE_TEST_ORDER) static const unsigned char order[32] = { 0,0,0,0,0,0,0,0, @@ -48,11 +48,11 @@ static void secp256k1_scalar_order_get_num(secp256k1_num *r) { 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 }; #endif - secp256k1_num_set_bin(r, order, 32); + rustsecp256k1_v0_1_0_num_set_bin(r, order, 32); } #endif -static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { +static void rustsecp256k1_v0_1_0_scalar_inverse(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *x) { #if defined(EXHAUSTIVE_TEST_ORDER) int i; *r = 0; @@ -64,180 +64,180 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar VERIFY_CHECK(*r != 0); } #else - secp256k1_scalar *t; + rustsecp256k1_v0_1_0_scalar *t; int i; /* First compute xN as x ^ (2^N - 1) for some values of N, * and uM as x ^ M for some values of M. */ - secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; - secp256k1_scalar u2, u5, u9, u11, u13; + rustsecp256k1_v0_1_0_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; + rustsecp256k1_v0_1_0_scalar u2, u5, u9, u11, u13; - secp256k1_scalar_sqr(&u2, x); - secp256k1_scalar_mul(&x2, &u2, x); - secp256k1_scalar_mul(&u5, &u2, &x2); - secp256k1_scalar_mul(&x3, &u5, &u2); - secp256k1_scalar_mul(&u9, &x3, &u2); - secp256k1_scalar_mul(&u11, &u9, &u2); - secp256k1_scalar_mul(&u13, &u11, &u2); + rustsecp256k1_v0_1_0_scalar_sqr(&u2, x); + rustsecp256k1_v0_1_0_scalar_mul(&x2, &u2, x); + rustsecp256k1_v0_1_0_scalar_mul(&u5, &u2, &x2); + rustsecp256k1_v0_1_0_scalar_mul(&x3, &u5, &u2); + rustsecp256k1_v0_1_0_scalar_mul(&u9, &x3, &u2); + rustsecp256k1_v0_1_0_scalar_mul(&u11, &u9, &u2); + rustsecp256k1_v0_1_0_scalar_mul(&u13, &u11, &u2); - secp256k1_scalar_sqr(&x6, &u13); - secp256k1_scalar_sqr(&x6, &x6); - secp256k1_scalar_mul(&x6, &x6, &u11); + rustsecp256k1_v0_1_0_scalar_sqr(&x6, &u13); + rustsecp256k1_v0_1_0_scalar_sqr(&x6, &x6); + rustsecp256k1_v0_1_0_scalar_mul(&x6, &x6, &u11); - secp256k1_scalar_sqr(&x8, &x6); - secp256k1_scalar_sqr(&x8, &x8); - secp256k1_scalar_mul(&x8, &x8, &x2); + rustsecp256k1_v0_1_0_scalar_sqr(&x8, &x6); + rustsecp256k1_v0_1_0_scalar_sqr(&x8, &x8); + rustsecp256k1_v0_1_0_scalar_mul(&x8, &x8, &x2); - secp256k1_scalar_sqr(&x14, &x8); + rustsecp256k1_v0_1_0_scalar_sqr(&x14, &x8); for (i = 0; i < 5; i++) { - secp256k1_scalar_sqr(&x14, &x14); + rustsecp256k1_v0_1_0_scalar_sqr(&x14, &x14); } - secp256k1_scalar_mul(&x14, &x14, &x6); + rustsecp256k1_v0_1_0_scalar_mul(&x14, &x14, &x6); - secp256k1_scalar_sqr(&x28, &x14); + rustsecp256k1_v0_1_0_scalar_sqr(&x28, &x14); for (i = 0; i < 13; i++) { - secp256k1_scalar_sqr(&x28, &x28); + rustsecp256k1_v0_1_0_scalar_sqr(&x28, &x28); } - secp256k1_scalar_mul(&x28, &x28, &x14); + rustsecp256k1_v0_1_0_scalar_mul(&x28, &x28, &x14); - secp256k1_scalar_sqr(&x56, &x28); + rustsecp256k1_v0_1_0_scalar_sqr(&x56, &x28); for (i = 0; i < 27; i++) { - secp256k1_scalar_sqr(&x56, &x56); + rustsecp256k1_v0_1_0_scalar_sqr(&x56, &x56); } - secp256k1_scalar_mul(&x56, &x56, &x28); + rustsecp256k1_v0_1_0_scalar_mul(&x56, &x56, &x28); - secp256k1_scalar_sqr(&x112, &x56); + rustsecp256k1_v0_1_0_scalar_sqr(&x112, &x56); for (i = 0; i < 55; i++) { - secp256k1_scalar_sqr(&x112, &x112); + rustsecp256k1_v0_1_0_scalar_sqr(&x112, &x112); } - secp256k1_scalar_mul(&x112, &x112, &x56); + rustsecp256k1_v0_1_0_scalar_mul(&x112, &x112, &x56); - secp256k1_scalar_sqr(&x126, &x112); + rustsecp256k1_v0_1_0_scalar_sqr(&x126, &x112); for (i = 0; i < 13; i++) { - secp256k1_scalar_sqr(&x126, &x126); + rustsecp256k1_v0_1_0_scalar_sqr(&x126, &x126); } - secp256k1_scalar_mul(&x126, &x126, &x14); + rustsecp256k1_v0_1_0_scalar_mul(&x126, &x126, &x14); /* Then accumulate the final result (t starts at x126). */ t = &x126; for (i = 0; i < 3; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u5); /* 101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u5); /* 101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 5; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u11); /* 1011 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u11); /* 1011 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 5; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 6; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u5); /* 101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 3; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 5; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u9); /* 1001 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 000 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u5); /* 101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 10; i++) { /* 0000000 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 9; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x8); /* 11111111 */ for (i = 0; i < 5; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u9); /* 1001 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u11); /* 1011 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 5; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &x2); /* 11 */ for (i = 0; i < 6; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 10; i++) { /* 000000 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 4; i++) { - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &u9); /* 1001 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 00000 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ + rustsecp256k1_v0_1_0_scalar_mul(t, t, x); /* 1 */ for (i = 0; i < 8; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); + rustsecp256k1_v0_1_0_scalar_sqr(t, t); } - secp256k1_scalar_mul(r, t, &x6); /* 111111 */ + rustsecp256k1_v0_1_0_scalar_mul(r, t, &x6); /* 111111 */ } -SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_even(const rustsecp256k1_v0_1_0_scalar *a) { return !(a->d[0] & 1); } #endif -static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { +static void rustsecp256k1_v0_1_0_scalar_inverse_var(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *x) { #if defined(USE_SCALAR_INV_BUILTIN) - secp256k1_scalar_inverse(r, x); + rustsecp256k1_v0_1_0_scalar_inverse(r, x); #elif defined(USE_SCALAR_INV_NUM) unsigned char b[32]; - secp256k1_num n, m; - secp256k1_scalar t = *x; - secp256k1_scalar_get_b32(b, &t); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_scalar_order_get_num(&m); - secp256k1_num_mod_inverse(&n, &n, &m); - secp256k1_num_get_bin(b, 32, &n); - secp256k1_scalar_set_b32(r, b, NULL); + rustsecp256k1_v0_1_0_num n, m; + rustsecp256k1_v0_1_0_scalar t = *x; + rustsecp256k1_v0_1_0_scalar_get_b32(b, &t); + rustsecp256k1_v0_1_0_num_set_bin(&n, b, 32); + rustsecp256k1_v0_1_0_scalar_order_get_num(&m); + rustsecp256k1_v0_1_0_num_mod_inverse(&n, &n, &m); + rustsecp256k1_v0_1_0_num_get_bin(b, 32, &n); + rustsecp256k1_v0_1_0_scalar_set_b32(r, b, NULL); /* Verify that the inverse was computed correctly, without GMP code. */ - secp256k1_scalar_mul(&t, &t, r); - CHECK(secp256k1_scalar_is_one(&t)); + rustsecp256k1_v0_1_0_scalar_mul(&t, &t, r); + CHECK(rustsecp256k1_v0_1_0_scalar_is_one(&t)); #else #error "Please select scalar inverse implementation" #endif @@ -251,7 +251,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_split_lambda(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a) { *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } @@ -294,38 +294,38 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - secp256k1_scalar c1, c2; - static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST( +static void rustsecp256k1_v0_1_0_scalar_split_lambda(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a) { + rustsecp256k1_v0_1_0_scalar c1, c2; + static const rustsecp256k1_v0_1_0_scalar minus_lambda = SECP256K1_SCALAR_CONST( 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL, 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL ); - static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_1_0_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL ); - static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_1_0_scalar minus_b2 = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); - static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_1_0_scalar g1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL, 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL ); - static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_1_0_scalar g2 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL, 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL ); VERIFY_CHECK(r1 != a); VERIFY_CHECK(r2 != a); /* these _var calls are constant time since the shift amount is constant */ - secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272); - secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272); - secp256k1_scalar_mul(&c1, &c1, &minus_b1); - secp256k1_scalar_mul(&c2, &c2, &minus_b2); - secp256k1_scalar_add(r2, &c1, &c2); - secp256k1_scalar_mul(r1, r2, &minus_lambda); - secp256k1_scalar_add(r1, r1, a); + rustsecp256k1_v0_1_0_scalar_mul_shift_var(&c1, a, &g1, 272); + rustsecp256k1_v0_1_0_scalar_mul_shift_var(&c2, a, &g2, 272); + rustsecp256k1_v0_1_0_scalar_mul(&c1, &c1, &minus_b1); + rustsecp256k1_v0_1_0_scalar_mul(&c2, &c2, &minus_b2); + rustsecp256k1_v0_1_0_scalar_add(r2, &c1, &c2); + rustsecp256k1_v0_1_0_scalar_mul(r1, r2, &minus_lambda); + rustsecp256k1_v0_1_0_scalar_add(r1, r1, a); } #endif #endif diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low.h b/secp256k1-sys/depend/secp256k1/src/scalar_low.h index 5836feb..68505fa 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low.h @@ -10,6 +10,6 @@ #include /** A scalar modulo the group order of the secp256k1 curve. */ -typedef uint32_t secp256k1_scalar; +typedef uint32_t rustsecp256k1_v0_1_0_scalar; #endif /* SECP256K1_SCALAR_REPR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h index c80e70c..970558b 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h @@ -11,40 +11,40 @@ #include -SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_even(const rustsecp256k1_v0_1_0_scalar *a) { return !(*a & 1); } -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_clear(rustsecp256k1_v0_1_0_scalar *r) { *r = 0; } +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_scalar_set_int(rustsecp256k1_v0_1_0_scalar *r, unsigned int v) { *r = v; } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); else return 0; } -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - return secp256k1_scalar_get_bits(a, offset, count); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_1_0_scalar_get_bits_var(const rustsecp256k1_v0_1_0_scalar *a, unsigned int offset, unsigned int count) { + return rustsecp256k1_v0_1_0_scalar_get_bits(a, offset, count); } -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_check_overflow(const rustsecp256k1_v0_1_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static int rustsecp256k1_v0_1_0_scalar_add(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; return *r < *b; } -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_1_0_scalar_cadd_bit(rustsecp256k1_v0_1_0_scalar *r, unsigned int bit, int flag) { if (flag && bit < 32) *r += (1 << bit); #ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_1_0_scalar_check_overflow(r) == 0); #endif } -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_1_0_scalar_set_b32(rustsecp256k1_v0_1_0_scalar *r, const unsigned char *b32, int *overflow) { const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; int i; *r = 0; @@ -55,16 +55,16 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b if (overflow) *overflow = 0; } -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { +static void rustsecp256k1_v0_1_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_1_0_scalar* a) { memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_zero(const rustsecp256k1_v0_1_0_scalar *a) { return *a == 0; } -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_negate(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { if (*a == 0) { *r = 0; } else { @@ -72,24 +72,24 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar } } -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_is_one(const rustsecp256k1_v0_1_0_scalar *a) { return *a == 1; } -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { +static int rustsecp256k1_v0_1_0_scalar_is_high(const rustsecp256k1_v0_1_0_scalar *a) { return *a > EXHAUSTIVE_TEST_ORDER / 2; } -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - if (flag) secp256k1_scalar_negate(r, r); +static int rustsecp256k1_v0_1_0_scalar_cond_negate(rustsecp256k1_v0_1_0_scalar *r, int flag) { + if (flag) rustsecp256k1_v0_1_0_scalar_negate(r, r); return flag ? -1 : 1; } -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { +static void rustsecp256k1_v0_1_0_scalar_mul(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { +static int rustsecp256k1_v0_1_0_scalar_shr_int(rustsecp256k1_v0_1_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -98,16 +98,16 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_sqr(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *a) { *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; } -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { +static void rustsecp256k1_v0_1_0_scalar_split_128(rustsecp256k1_v0_1_0_scalar *r1, rustsecp256k1_v0_1_0_scalar *r2, const rustsecp256k1_v0_1_0_scalar *a) { *r1 = *a; *r2 = 0; } -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_1_0_scalar_eq(const rustsecp256k1_v0_1_0_scalar *a, const rustsecp256k1_v0_1_0_scalar *b) { return *a == *b; } diff --git a/secp256k1-sys/depend/secp256k1/src/scratch.h b/secp256k1-sys/depend/secp256k1/src/scratch.h index 814f1b1..902563f 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch.h @@ -9,7 +9,7 @@ /* The typedef is used internally; the struct name is used in the public API * (where it is exposed as a different typedef) */ -typedef struct secp256k1_scratch_space_struct { +typedef struct rustsecp256k1_v0_1_0_scratch_space_struct { /** guard against interpreting this object as other types */ unsigned char magic[8]; /** actual allocated data */ @@ -19,22 +19,24 @@ typedef struct secp256k1_scratch_space_struct { size_t alloc_size; /** maximum size available to allocate */ size_t max_size; -} secp256k1_scratch; +} rustsecp256k1_v0_1_0_scratch; +static rustsecp256k1_v0_1_0_scratch* rustsecp256k1_v0_1_0_scratch_create(const rustsecp256k1_v0_1_0_callback* error_callback, size_t max_size); +static void rustsecp256k1_v0_1_0_scratch_destroy(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch* scratch); /** Returns an opaque object used to "checkpoint" a scratch space. Used - * with `secp256k1_scratch_apply_checkpoint` to undo allocations. */ -static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch); + * with `rustsecp256k1_v0_1_0_scratch_apply_checkpoint` to undo allocations. */ +static size_t rustsecp256k1_v0_1_0_scratch_checkpoint(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_scratch* scratch); -/** Applies a check point received from `secp256k1_scratch_checkpoint`, +/** Applies a check point received from `rustsecp256k1_v0_1_0_scratch_checkpoint`, * undoing all allocations since that point. */ -static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint); +static void rustsecp256k1_v0_1_0_scratch_apply_checkpoint(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch* scratch, size_t checkpoint); /** Returns the maximum allocation the scratch space will allow */ -static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t n_objects); +static size_t rustsecp256k1_v0_1_0_scratch_max_allocation(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_scratch* scratch, size_t n_objects); /** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */ -static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t n); +static void *rustsecp256k1_v0_1_0_scratch_alloc(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch* scratch, size_t n); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h index 1c42b07..78005cf 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h @@ -10,29 +10,29 @@ #include "util.h" #include "scratch.h" -static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) { +static size_t rustsecp256k1_v0_1_0_scratch_checkpoint(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_scratch* scratch) { if (memcmp(scratch->magic, "scratch", 8) != 0) { - secp256k1_callback_call(error_callback, "invalid scratch space"); + rustsecp256k1_v0_1_0_callback_call(error_callback, "invalid scratch space"); return 0; } return scratch->alloc_size; } -static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) { +static void rustsecp256k1_v0_1_0_scratch_apply_checkpoint(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch* scratch, size_t checkpoint) { if (memcmp(scratch->magic, "scratch", 8) != 0) { - secp256k1_callback_call(error_callback, "invalid scratch space"); + rustsecp256k1_v0_1_0_callback_call(error_callback, "invalid scratch space"); return; } if (checkpoint > scratch->alloc_size) { - secp256k1_callback_call(error_callback, "invalid checkpoint"); + rustsecp256k1_v0_1_0_callback_call(error_callback, "invalid checkpoint"); return; } scratch->alloc_size = checkpoint; } -static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) { +static size_t rustsecp256k1_v0_1_0_scratch_max_allocation(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_scratch* scratch, size_t objects) { if (memcmp(scratch->magic, "scratch", 8) != 0) { - secp256k1_callback_call(error_callback, "invalid scratch space"); + rustsecp256k1_v0_1_0_callback_call(error_callback, "invalid scratch space"); return 0; } if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) { @@ -41,12 +41,12 @@ static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_c return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); } -static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) { +static void *rustsecp256k1_v0_1_0_scratch_alloc(const rustsecp256k1_v0_1_0_callback* error_callback, rustsecp256k1_v0_1_0_scratch* scratch, size_t size) { void *ret; size = ROUND_TO_ALIGN(size); if (memcmp(scratch->magic, "scratch", 8) != 0) { - secp256k1_callback_call(error_callback, "invalid scratch space"); + rustsecp256k1_v0_1_0_callback_call(error_callback, "invalid scratch space"); return NULL; } diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c b/secp256k1-sys/depend/secp256k1/src/secp256k1.c index 44e7961..2f252bc 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c @@ -22,65 +22,65 @@ #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - secp256k1_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_1_0_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ - secp256k1_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_1_0_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS #include #include -static void secp256k1_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_1_0_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void secp256k1_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_1_0_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void secp256k1_default_illegal_callback_fn(const char* str, void* data); -void secp256k1_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_1_0_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_1_0_default_error_callback_fn(const char* str, void* data); #endif -static const secp256k1_callback default_illegal_callback = { - secp256k1_default_illegal_callback_fn, +static const rustsecp256k1_v0_1_0_callback default_illegal_callback = { + rustsecp256k1_v0_1_0_default_illegal_callback_fn, NULL }; -static const secp256k1_callback default_error_callback = { - secp256k1_default_error_callback_fn, +static const rustsecp256k1_v0_1_0_callback default_error_callback = { + rustsecp256k1_v0_1_0_default_error_callback_fn, NULL }; -struct secp256k1_context_struct { - secp256k1_ecmult_context ecmult_ctx; - secp256k1_ecmult_gen_context ecmult_gen_ctx; - secp256k1_callback illegal_callback; - secp256k1_callback error_callback; +struct rustsecp256k1_v0_1_0_context_struct { + rustsecp256k1_v0_1_0_ecmult_context ecmult_ctx; + rustsecp256k1_v0_1_0_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_1_0_callback illegal_callback; + rustsecp256k1_v0_1_0_callback error_callback; }; -static const secp256k1_context secp256k1_context_no_precomp_ = { +static const rustsecp256k1_v0_1_0_context rustsecp256k1_v0_1_0_context_no_precomp_ = { { 0 }, { 0 }, - { secp256k1_default_illegal_callback_fn, 0 }, - { secp256k1_default_error_callback_fn, 0 } + { rustsecp256k1_v0_1_0_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_1_0_default_error_callback_fn, 0 } }; -const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_; +const rustsecp256k1_v0_1_0_context *rustsecp256k1_v0_1_0_context_no_precomp = &rustsecp256k1_v0_1_0_context_no_precomp_; -size_t secp256k1_context_preallocated_size(unsigned int flags) { - size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); +size_t rustsecp256k1_v0_1_0_context_preallocated_size(unsigned int flags) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_1_0_context)); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - secp256k1_callback_call(&default_illegal_callback, + rustsecp256k1_v0_1_0_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } @@ -94,138 +94,138 @@ size_t secp256k1_context_preallocated_size(unsigned int flags) { return ret; } -size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) { - size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); +size_t rustsecp256k1_v0_1_0_context_preallocated_clone_size(const rustsecp256k1_v0_1_0_context* ctx) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_1_0_context)); VERIFY_CHECK(ctx != NULL); - if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + if (rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; } - if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) { + if (rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx)) { ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; } return ret; } -secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_create(void* prealloc, unsigned int flags) { void* const base = prealloc; size_t prealloc_size; - secp256k1_context* ret; + rustsecp256k1_v0_1_0_context* ret; VERIFY_CHECK(prealloc != NULL); - prealloc_size = secp256k1_context_preallocated_size(flags); - ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size); + prealloc_size = rustsecp256k1_v0_1_0_context_preallocated_size(flags); + ret = (rustsecp256k1_v0_1_0_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_1_0_context), base, prealloc_size); ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - secp256k1_callback_call(&ret->illegal_callback, + rustsecp256k1_v0_1_0_callback_call(&ret->illegal_callback, "Invalid flags"); return NULL; } - secp256k1_ecmult_context_init(&ret->ecmult_ctx); - secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_1_0_ecmult_context_init(&ret->ecmult_ctx); + rustsecp256k1_v0_1_0_ecmult_gen_context_init(&ret->ecmult_gen_ctx); if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { - secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); + rustsecp256k1_v0_1_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { - secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc); + rustsecp256k1_v0_1_0_ecmult_context_build(&ret->ecmult_ctx, &prealloc); } - return (secp256k1_context*) ret; + return (rustsecp256k1_v0_1_0_context*) ret; } -secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) { +rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_clone(const rustsecp256k1_v0_1_0_context* ctx, void* prealloc) { size_t prealloc_size; - secp256k1_context* ret; + rustsecp256k1_v0_1_0_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - prealloc_size = secp256k1_context_preallocated_clone_size(ctx); - ret = (secp256k1_context*)prealloc; + prealloc_size = rustsecp256k1_v0_1_0_context_preallocated_clone_size(ctx); + ret = (rustsecp256k1_v0_1_0_context*)prealloc; memcpy(ret, ctx, prealloc_size); - secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); - secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); + rustsecp256k1_v0_1_0_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); + rustsecp256k1_v0_1_0_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); return ret; } -void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) { - ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); +void rustsecp256k1_v0_1_0_context_preallocated_destroy(rustsecp256k1_v0_1_0_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_1_0_context_no_precomp); if (ctx != NULL) { - secp256k1_ecmult_context_clear(&ctx->ecmult_ctx); - secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_1_0_ecmult_context_clear(&ctx->ecmult_ctx); + rustsecp256k1_v0_1_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } -void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); +void rustsecp256k1_v0_1_0_context_set_illegal_callback(rustsecp256k1_v0_1_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_1_0_context_no_precomp); if (fun == NULL) { - fun = secp256k1_default_illegal_callback_fn; + fun = rustsecp256k1_v0_1_0_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); +void rustsecp256k1_v0_1_0_context_set_error_callback(rustsecp256k1_v0_1_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_1_0_context_no_precomp); if (fun == NULL) { - fun = secp256k1_default_error_callback_fn; + fun = rustsecp256k1_v0_1_0_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; } -static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) { - if (sizeof(secp256k1_ge_storage) == 64) { - /* When the secp256k1_ge_storage type is exactly 64 byte, use its - * representation inside secp256k1_pubkey, as conversion is very fast. - * Note that secp256k1_pubkey_save must use the same representation. */ - secp256k1_ge_storage s; +static int rustsecp256k1_v0_1_0_pubkey_load(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ge* ge, const rustsecp256k1_v0_1_0_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_1_0_ge_storage) == 64) { + /* When the rustsecp256k1_v0_1_0_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_1_0_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_1_0_pubkey_save must use the same representation. */ + rustsecp256k1_v0_1_0_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - secp256k1_ge_from_storage(ge, &s); + rustsecp256k1_v0_1_0_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - secp256k1_fe x, y; - secp256k1_fe_set_b32(&x, pubkey->data); - secp256k1_fe_set_b32(&y, pubkey->data + 32); - secp256k1_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_1_0_fe x, y; + rustsecp256k1_v0_1_0_fe_set_b32(&x, pubkey->data); + rustsecp256k1_v0_1_0_fe_set_b32(&y, pubkey->data + 32); + rustsecp256k1_v0_1_0_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!secp256k1_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_1_0_fe_is_zero(&ge->x)); return 1; } -static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) { - if (sizeof(secp256k1_ge_storage) == 64) { - secp256k1_ge_storage s; - secp256k1_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_1_0_pubkey_save(rustsecp256k1_v0_1_0_pubkey* pubkey, rustsecp256k1_v0_1_0_ge* ge) { + if (sizeof(rustsecp256k1_v0_1_0_ge_storage) == 64) { + rustsecp256k1_v0_1_0_ge_storage s; + rustsecp256k1_v0_1_0_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!secp256k1_ge_is_infinity(ge)); - secp256k1_fe_normalize_var(&ge->x); - secp256k1_fe_normalize_var(&ge->y); - secp256k1_fe_get_b32(pubkey->data, &ge->x); - secp256k1_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_1_0_ge_is_infinity(ge)); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge->x); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge->y); + rustsecp256k1_v0_1_0_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_1_0_fe_get_b32(pubkey->data + 32, &ge->y); } } -int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - secp256k1_ge Q; +int rustsecp256k1_v0_1_0_ec_pubkey_parse(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_1_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_1_0_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - secp256k1_pubkey_save(pubkey, &Q); - secp256k1_ge_clear(&Q); + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_1_0_ge_clear(&Q); return 1; } -int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) { - secp256k1_ge Q; +int rustsecp256k1_v0_1_0_ec_pubkey_serialize(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_1_0_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_1_0_ge Q; size_t len; int ret = 0; @@ -238,8 +238,8 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (secp256k1_pubkey_load(ctx, &Q, pubkey)) { - ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_1_0_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -247,39 +247,39 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o return ret; } -static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) { +static void rustsecp256k1_v0_1_0_ecdsa_signature_load(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_scalar* r, rustsecp256k1_v0_1_0_scalar* s, const rustsecp256k1_v0_1_0_ecdsa_signature* sig) { (void)ctx; - if (sizeof(secp256k1_scalar) == 32) { - /* When the secp256k1_scalar type is exactly 32 byte, use its - * representation inside secp256k1_ecdsa_signature, as conversion is very fast. - * Note that secp256k1_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) { + /* When the rustsecp256k1_v0_1_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_1_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_1_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - secp256k1_scalar_set_b32(r, &sig->data[0], NULL); - secp256k1_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(s, &sig->data[32], NULL); } } -static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) { - if (sizeof(secp256k1_scalar) == 32) { +static void rustsecp256k1_v0_1_0_ecdsa_signature_save(rustsecp256k1_v0_1_0_ecdsa_signature* sig, const rustsecp256k1_v0_1_0_scalar* r, const rustsecp256k1_v0_1_0_scalar* s) { + if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - secp256k1_scalar_get_b32(&sig->data[0], r); - secp256k1_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[32], s); } } -int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_1_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) { - secp256k1_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_1_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_1_0_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -287,8 +287,8 @@ int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ } } -int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_1_0_scalar r, s; int ret = 1; int overflow = 0; @@ -296,77 +296,77 @@ int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp25 ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - secp256k1_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - secp256k1_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - secp256k1_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_1_0_ecdsa_signature* sig) { + rustsecp256k1_v0_1_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_1_0_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_1_0_ecdsa_signature* sig) { + rustsecp256k1_v0_1_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - secp256k1_scalar_get_b32(&output64[0], &r); - secp256k1_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_1_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_1_0_scalar_get_b32(&output64[32], &s); return 1; } -int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) { - secp256k1_scalar r, s; +int rustsecp256k1_v0_1_0_ecdsa_signature_normalize(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature *sigout, const rustsecp256k1_v0_1_0_ecdsa_signature *sigin) { + rustsecp256k1_v0_1_0_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = secp256k1_scalar_is_high(&s); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_1_0_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - secp256k1_scalar_negate(&s, &s); + rustsecp256k1_v0_1_0_scalar_negate(&s, &s); } - secp256k1_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { - secp256k1_ge q; - secp256k1_scalar r, s; - secp256k1_scalar m; +int rustsecp256k1_v0_1_0_ecdsa_verify(const rustsecp256k1_v0_1_0_context* ctx, const rustsecp256k1_v0_1_0_ecdsa_signature *sig, const unsigned char *msg32, const rustsecp256k1_v0_1_0_pubkey *pubkey) { + rustsecp256k1_v0_1_0_ge q; + rustsecp256k1_v0_1_0_scalar r, s; + rustsecp256k1_v0_1_0_scalar m; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - secp256k1_scalar_set_b32(&m, msg32, NULL); - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - return (!secp256k1_scalar_is_high(&s) && - secp256k1_pubkey_load(ctx, &q, pubkey) && - secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); + rustsecp256k1_v0_1_0_scalar_set_b32(&m, msg32, NULL); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_1_0_scalar_is_high(&s) && + rustsecp256k1_v0_1_0_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -377,7 +377,7 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - secp256k1_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng; unsigned int i; /* We feed a byte array to the PRNG as input, consisting of: * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. @@ -395,167 +395,167 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979; -const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_default = nonce_function_rfc6979; -int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar r, s; - secp256k1_scalar sec, non, msg; +int rustsecp256k1_v0_1_0_ecdsa_sign(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_1_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_1_0_scalar r, s; + rustsecp256k1_v0_1_0_scalar sec, non, msg; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; + noncefp = rustsecp256k1_v0_1_0_nonce_function_default; } - secp256k1_scalar_set_b32(&sec, seckey, &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ - if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + if (!overflow && !rustsecp256k1_v0_1_0_scalar_is_zero(&sec)) { unsigned char nonce32[32]; unsigned int count = 0; - secp256k1_scalar_set_b32(&msg, msg32, NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(&msg, msg32, NULL); while (1) { ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - if (!overflow && !secp256k1_scalar_is_zero(&non)) { - if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { + rustsecp256k1_v0_1_0_scalar_set_b32(&non, nonce32, &overflow); + if (!overflow && !rustsecp256k1_v0_1_0_scalar_is_zero(&non)) { + if (rustsecp256k1_v0_1_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { break; } } count++; } memset(nonce32, 0, 32); - secp256k1_scalar_clear(&msg); - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&msg); + rustsecp256k1_v0_1_0_scalar_clear(&non); + rustsecp256k1_v0_1_0_scalar_clear(&sec); } if (ret) { - secp256k1_ecdsa_signature_save(signature, &r, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(signature, &r, &s); } else { memset(signature, 0, sizeof(*signature)); } return ret; } -int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) { - secp256k1_scalar sec; +int rustsecp256k1_v0_1_0_ec_seckey_verify(const rustsecp256k1_v0_1_0_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_1_0_scalar sec; int ret; int overflow; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - ret = !overflow && !secp256k1_scalar_is_zero(&sec); - secp256k1_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, &overflow); + ret = !overflow && !rustsecp256k1_v0_1_0_scalar_is_zero(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&sec); return ret; } -int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) { - secp256k1_gej pj; - secp256k1_ge p; - secp256k1_scalar sec; +int rustsecp256k1_v0_1_0_ec_pubkey_create(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_1_0_gej pj; + rustsecp256k1_v0_1_0_ge p; + rustsecp256k1_v0_1_0_scalar sec; int overflow; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec)); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, &overflow); + ret = (!overflow) & (!rustsecp256k1_v0_1_0_scalar_is_zero(&sec)); if (ret) { - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec); - secp256k1_ge_set_gej(&p, &pj); - secp256k1_pubkey_save(pubkey, &p); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec); + rustsecp256k1_v0_1_0_ge_set_gej(&p, &pj); + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &p); } - secp256k1_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&sec); return ret; } -int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *seckey) { - secp256k1_scalar sec; +int rustsecp256k1_v0_1_0_ec_privkey_negate(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_1_0_scalar sec; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - secp256k1_scalar_set_b32(&sec, seckey, NULL); - secp256k1_scalar_negate(&sec, &sec); - secp256k1_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, NULL); + rustsecp256k1_v0_1_0_scalar_negate(&sec, &sec); + rustsecp256k1_v0_1_0_scalar_get_b32(seckey, &sec); - secp256k1_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&sec); return 1; } -int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *pubkey) { +int rustsecp256k1_v0_1_0_ec_pubkey_negate(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey) { int ret = 0; - secp256k1_ge p; + rustsecp256k1_v0_1_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = secp256k1_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_1_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - secp256k1_ge_neg(&p, &p); - secp256k1_pubkey_save(pubkey, &p); + rustsecp256k1_v0_1_0_ge_neg(&p, &p); + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &p); } return ret; } -int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { - secp256k1_scalar term; - secp256k1_scalar sec; +int rustsecp256k1_v0_1_0_ec_privkey_tweak_add(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey, const unsigned char *tweak) { + rustsecp256k1_v0_1_0_scalar term; + rustsecp256k1_v0_1_0_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); - secp256k1_scalar_set_b32(&term, tweak, &overflow); - secp256k1_scalar_set_b32(&sec, seckey, NULL); + rustsecp256k1_v0_1_0_scalar_set_b32(&term, tweak, &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, NULL); - ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term); + ret = !overflow && rustsecp256k1_v0_1_0_eckey_privkey_tweak_add(&sec, &term); memset(seckey, 0, 32); if (ret) { - secp256k1_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_1_0_scalar_get_b32(seckey, &sec); } - secp256k1_scalar_clear(&sec); - secp256k1_scalar_clear(&term); + rustsecp256k1_v0_1_0_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&term); return ret; } -int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { - secp256k1_ge p; - secp256k1_scalar term; +int rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *tweak) { + rustsecp256k1_v0_1_0_ge p; + rustsecp256k1_v0_1_0_scalar term; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak != NULL); - secp256k1_scalar_set_b32(&term, tweak, &overflow); - ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_1_0_scalar_set_b32(&term, tweak, &overflow); + ret = !overflow && rustsecp256k1_v0_1_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) { - secp256k1_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_1_0_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) { + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -564,44 +564,44 @@ int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey return ret; } -int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { - secp256k1_scalar factor; - secp256k1_scalar sec; +int rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *seckey, const unsigned char *tweak) { + rustsecp256k1_v0_1_0_scalar factor; + rustsecp256k1_v0_1_0_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); - secp256k1_scalar_set_b32(&factor, tweak, &overflow); - secp256k1_scalar_set_b32(&sec, seckey, NULL); - ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_1_0_scalar_set_b32(&factor, tweak, &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, NULL); + ret = !overflow && rustsecp256k1_v0_1_0_eckey_privkey_tweak_mul(&sec, &factor); memset(seckey, 0, 32); if (ret) { - secp256k1_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_1_0_scalar_get_b32(seckey, &sec); } - secp256k1_scalar_clear(&sec); - secp256k1_scalar_clear(&factor); + rustsecp256k1_v0_1_0_scalar_clear(&sec); + rustsecp256k1_v0_1_0_scalar_clear(&factor); return ret; } -int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { - secp256k1_ge p; - secp256k1_scalar factor; +int rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey, const unsigned char *tweak) { + rustsecp256k1_v0_1_0_ge p; + rustsecp256k1_v0_1_0_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak != NULL); - secp256k1_scalar_set_b32(&factor, tweak, &overflow); - ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_1_0_scalar_set_b32(&factor, tweak, &overflow); + ret = !overflow && rustsecp256k1_v0_1_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { - secp256k1_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_1_0_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { + rustsecp256k1_v0_1_0_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -610,35 +610,35 @@ int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey return ret; } -int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_1_0_context_randomize(rustsecp256k1_v0_1_0_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_1_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_1_0_ec_pubkey_combine(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubnonce, const rustsecp256k1_v0_1_0_pubkey * const *pubnonces, size_t n) { size_t i; - secp256k1_gej Qj; - secp256k1_ge Q; + rustsecp256k1_v0_1_0_gej Qj; + rustsecp256k1_v0_1_0_ge Q; ARG_CHECK(pubnonce != NULL); memset(pubnonce, 0, sizeof(*pubnonce)); ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - secp256k1_gej_set_infinity(&Qj); + rustsecp256k1_v0_1_0_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { - secp256k1_pubkey_load(ctx, &Q, pubnonces[i]); - secp256k1_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_1_0_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_1_0_gej_add_ge(&Qj, &Qj, &Q); } - if (secp256k1_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_1_0_gej_is_infinity(&Qj)) { return 0; } - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_1_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_1_0_pubkey_save(pubnonce, &Q); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/testrand.h b/secp256k1-sys/depend/secp256k1/src/testrand.h index f1f9be0..f4a96a6 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand.h @@ -14,25 +14,25 @@ /* A non-cryptographic RNG used only for test infrastructure. */ /** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16); +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_rand_seed(const unsigned char *seed16); /** Generate a pseudorandom number in the range [0..2**32-1]. */ -static uint32_t secp256k1_rand32(void); +static uint32_t rustsecp256k1_v0_1_0_rand32(void); /** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or * more. */ -static uint32_t secp256k1_rand_bits(int bits); +static uint32_t rustsecp256k1_v0_1_0_rand_bits(int bits); /** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t secp256k1_rand_int(uint32_t range); +static uint32_t rustsecp256k1_v0_1_0_rand_int(uint32_t range); /** Generate a pseudorandom 32-byte array. */ -static void secp256k1_rand256(unsigned char *b32); +static void rustsecp256k1_v0_1_0_rand256(unsigned char *b32); /** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void secp256k1_rand256_test(unsigned char *b32); +static void rustsecp256k1_v0_1_0_rand256_test(unsigned char *b32); /** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len); +static void rustsecp256k1_v0_1_0_rand_bytes_test(unsigned char *bytes, size_t len); #endif /* SECP256K1_TESTRAND_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h index 30a91e5..2adf750 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h @@ -13,38 +13,38 @@ #include "testrand.h" #include "hash.h" -static secp256k1_rfc6979_hmac_sha256 secp256k1_test_rng; -static uint32_t secp256k1_test_rng_precomputed[8]; -static int secp256k1_test_rng_precomputed_used = 8; -static uint64_t secp256k1_test_rng_integer; -static int secp256k1_test_rng_integer_bits_left = 0; +static rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rustsecp256k1_v0_1_0_test_rng; +static uint32_t rustsecp256k1_v0_1_0_test_rng_precomputed[8]; +static int rustsecp256k1_v0_1_0_test_rng_precomputed_used = 8; +static uint64_t rustsecp256k1_v0_1_0_test_rng_integer; +static int rustsecp256k1_v0_1_0_test_rng_integer_bits_left = 0; -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) { - secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16); +SECP256K1_INLINE static void rustsecp256k1_v0_1_0_rand_seed(const unsigned char *seed16) { + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rustsecp256k1_v0_1_0_test_rng, seed16, 16); } -SECP256K1_INLINE static uint32_t secp256k1_rand32(void) { - if (secp256k1_test_rng_precomputed_used == 8) { - secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed)); - secp256k1_test_rng_precomputed_used = 0; +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_1_0_rand32(void) { + if (rustsecp256k1_v0_1_0_test_rng_precomputed_used == 8) { + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_1_0_test_rng, (unsigned char*)(&rustsecp256k1_v0_1_0_test_rng_precomputed[0]), sizeof(rustsecp256k1_v0_1_0_test_rng_precomputed)); + rustsecp256k1_v0_1_0_test_rng_precomputed_used = 0; } - return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++]; + return rustsecp256k1_v0_1_0_test_rng_precomputed[rustsecp256k1_v0_1_0_test_rng_precomputed_used++]; } -static uint32_t secp256k1_rand_bits(int bits) { +static uint32_t rustsecp256k1_v0_1_0_rand_bits(int bits) { uint32_t ret; - if (secp256k1_test_rng_integer_bits_left < bits) { - secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left); - secp256k1_test_rng_integer_bits_left += 32; + if (rustsecp256k1_v0_1_0_test_rng_integer_bits_left < bits) { + rustsecp256k1_v0_1_0_test_rng_integer |= (((uint64_t)rustsecp256k1_v0_1_0_rand32()) << rustsecp256k1_v0_1_0_test_rng_integer_bits_left); + rustsecp256k1_v0_1_0_test_rng_integer_bits_left += 32; } - ret = secp256k1_test_rng_integer; - secp256k1_test_rng_integer >>= bits; - secp256k1_test_rng_integer_bits_left -= bits; + ret = rustsecp256k1_v0_1_0_test_rng_integer; + rustsecp256k1_v0_1_0_test_rng_integer >>= bits; + rustsecp256k1_v0_1_0_test_rng_integer_bits_left -= bits; ret &= ((~((uint32_t)0)) >> (32 - bits)); return ret; } -static uint32_t secp256k1_rand_int(uint32_t range) { +static uint32_t rustsecp256k1_v0_1_0_rand_int(uint32_t range) { /* We want a uniform integer between 0 and range-1, inclusive. * B is the smallest number such that range <= 2**B. * two mechanisms implemented here: @@ -76,25 +76,25 @@ static uint32_t secp256k1_rand_int(uint32_t range) { mult = 1; } while(1) { - uint32_t x = secp256k1_rand_bits(bits); + uint32_t x = rustsecp256k1_v0_1_0_rand_bits(bits); if (x < trange) { return (mult == 1) ? x : (x % range); } } } -static void secp256k1_rand256(unsigned char *b32) { - secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32); +static void rustsecp256k1_v0_1_0_rand256(unsigned char *b32) { + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_1_0_test_rng, b32, 32); } -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) { +static void rustsecp256k1_v0_1_0_rand_bytes_test(unsigned char *bytes, size_t len) { size_t bits = 0; memset(bytes, 0, len); while (bits < len * 8) { int now; uint32_t val; - now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31; - val = secp256k1_rand_bits(1); + now = 1 + (rustsecp256k1_v0_1_0_rand_bits(6) * rustsecp256k1_v0_1_0_rand_bits(5) + 16) / 31; + val = rustsecp256k1_v0_1_0_rand_bits(1); while (now > 0 && bits < len * 8) { bytes[bits / 8] |= val << (bits % 8); now--; @@ -103,8 +103,8 @@ static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) { } } -static void secp256k1_rand256_test(unsigned char *b32) { - secp256k1_rand_bytes_test(b32, 32); +static void rustsecp256k1_v0_1_0_rand256_test(unsigned char *b32) { + rustsecp256k1_v0_1_0_rand_bytes_test(b32, 32); } #endif /* SECP256K1_TESTRAND_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/tests.c b/secp256k1-sys/depend/secp256k1/src/tests.c index 1d813f1..49df37d 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests.c +++ b/secp256k1-sys/depend/secp256k1/src/tests.c @@ -44,7 +44,7 @@ void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) #endif static int count = 64; -static secp256k1_context *ctx = NULL; +static rustsecp256k1_v0_1_0_context *ctx = NULL; static void counting_illegal_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts. */ @@ -62,76 +62,76 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) { (*p)--; } -void random_field_element_test(secp256k1_fe *fe) { +void random_field_element_test(rustsecp256k1_v0_1_0_fe *fe) { do { unsigned char b32[32]; - secp256k1_rand256_test(b32); - if (secp256k1_fe_set_b32(fe, b32)) { + rustsecp256k1_v0_1_0_rand256_test(b32); + if (rustsecp256k1_v0_1_0_fe_set_b32(fe, b32)) { break; } } while(1); } -void random_field_element_magnitude(secp256k1_fe *fe) { - secp256k1_fe zero; - int n = secp256k1_rand_int(9); - secp256k1_fe_normalize(fe); +void random_field_element_magnitude(rustsecp256k1_v0_1_0_fe *fe) { + rustsecp256k1_v0_1_0_fe zero; + int n = rustsecp256k1_v0_1_0_rand_int(9); + rustsecp256k1_v0_1_0_fe_normalize(fe); if (n == 0) { return; } - secp256k1_fe_clear(&zero); - secp256k1_fe_negate(&zero, &zero, 0); - secp256k1_fe_mul_int(&zero, n - 1); - secp256k1_fe_add(fe, &zero); + rustsecp256k1_v0_1_0_fe_clear(&zero); + rustsecp256k1_v0_1_0_fe_negate(&zero, &zero, 0); + rustsecp256k1_v0_1_0_fe_mul_int(&zero, n - 1); + rustsecp256k1_v0_1_0_fe_add(fe, &zero); VERIFY_CHECK(fe->magnitude == n); } -void random_group_element_test(secp256k1_ge *ge) { - secp256k1_fe fe; +void random_group_element_test(rustsecp256k1_v0_1_0_ge *ge) { + rustsecp256k1_v0_1_0_fe fe; do { random_field_element_test(&fe); - if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) { - secp256k1_fe_normalize(&ge->y); + if (rustsecp256k1_v0_1_0_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_1_0_rand_bits(1))) { + rustsecp256k1_v0_1_0_fe_normalize(&ge->y); break; } } while(1); } -void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) { - secp256k1_fe z2, z3; +void random_group_element_jacobian_test(rustsecp256k1_v0_1_0_gej *gej, const rustsecp256k1_v0_1_0_ge *ge) { + rustsecp256k1_v0_1_0_fe z2, z3; do { random_field_element_test(&gej->z); - if (!secp256k1_fe_is_zero(&gej->z)) { + if (!rustsecp256k1_v0_1_0_fe_is_zero(&gej->z)) { break; } } while(1); - secp256k1_fe_sqr(&z2, &gej->z); - secp256k1_fe_mul(&z3, &z2, &gej->z); - secp256k1_fe_mul(&gej->x, &ge->x, &z2); - secp256k1_fe_mul(&gej->y, &ge->y, &z3); + rustsecp256k1_v0_1_0_fe_sqr(&z2, &gej->z); + rustsecp256k1_v0_1_0_fe_mul(&z3, &z2, &gej->z); + rustsecp256k1_v0_1_0_fe_mul(&gej->x, &ge->x, &z2); + rustsecp256k1_v0_1_0_fe_mul(&gej->y, &ge->y, &z3); gej->infinity = ge->infinity; } -void random_scalar_order_test(secp256k1_scalar *num) { +void random_scalar_order_test(rustsecp256k1_v0_1_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - secp256k1_rand256_test(b32); - secp256k1_scalar_set_b32(num, b32, &overflow); - if (overflow || secp256k1_scalar_is_zero(num)) { + rustsecp256k1_v0_1_0_rand256_test(b32); + rustsecp256k1_v0_1_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_1_0_scalar_is_zero(num)) { continue; } break; } while(1); } -void random_scalar_order(secp256k1_scalar *num) { +void random_scalar_order(rustsecp256k1_v0_1_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - secp256k1_rand256(b32); - secp256k1_scalar_set_b32(num, b32, &overflow); - if (overflow || secp256k1_scalar_is_zero(num)) { + rustsecp256k1_v0_1_0_rand256(b32); + rustsecp256k1_v0_1_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_1_0_scalar_is_zero(num)) { continue; } break; @@ -139,104 +139,104 @@ void random_scalar_order(secp256k1_scalar *num) { } void run_context_tests(int use_prealloc) { - secp256k1_pubkey pubkey; - secp256k1_pubkey zero_pubkey; - secp256k1_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey zero_pubkey; + rustsecp256k1_v0_1_0_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; int32_t ecount2; - secp256k1_context *none; - secp256k1_context *sign; - secp256k1_context *vrfy; - secp256k1_context *both; + rustsecp256k1_v0_1_0_context *none; + rustsecp256k1_v0_1_0_context *sign; + rustsecp256k1_v0_1_0_context *vrfy; + rustsecp256k1_v0_1_0_context *both; void *none_prealloc = NULL; void *sign_prealloc = NULL; void *vrfy_prealloc = NULL; void *both_prealloc = NULL; - secp256k1_gej pubj; - secp256k1_ge pub; - secp256k1_scalar msg, key, nonce; - secp256k1_scalar sigr, sigs; + rustsecp256k1_v0_1_0_gej pubj; + rustsecp256k1_v0_1_0_ge pub; + rustsecp256k1_v0_1_0_scalar msg, key, nonce; + rustsecp256k1_v0_1_0_scalar sigr, sigs; if (use_prealloc) { - none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + none_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + sign_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + vrfy_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + both_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(none_prealloc != NULL); CHECK(sign_prealloc != NULL); CHECK(vrfy_prealloc != NULL); CHECK(both_prealloc != NULL); - none = secp256k1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); - sign = secp256k1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); - vrfy = secp256k1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); - both = secp256k1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + none = rustsecp256k1_v0_1_0_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); + sign = rustsecp256k1_v0_1_0_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); + vrfy = rustsecp256k1_v0_1_0_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); + both = rustsecp256k1_v0_1_0_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } else { - none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + none = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_NONE); + sign = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN); + vrfy = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY); + both = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } memset(&zero_pubkey, 0, sizeof(zero_pubkey)); ecount = 0; ecount2 = 10; - secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); - secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); + rustsecp256k1_v0_1_0_context_set_error_callback(sign, counting_illegal_callback_fn, NULL); CHECK(vrfy->error_callback.fn != sign->error_callback.fn); /* check if sizes for cloning are consistent */ - CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - CHECK(secp256k1_context_preallocated_clone_size(sign) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - CHECK(secp256k1_context_preallocated_clone_size(vrfy) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - CHECK(secp256k1_context_preallocated_clone_size(both) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + CHECK(rustsecp256k1_v0_1_0_context_preallocated_clone_size(none) == rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(rustsecp256k1_v0_1_0_context_preallocated_clone_size(sign) == rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + CHECK(rustsecp256k1_v0_1_0_context_preallocated_clone_size(vrfy) == rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + CHECK(rustsecp256k1_v0_1_0_context_preallocated_clone_size(both) == rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); /*** clone and destroy all of them to make sure cloning was complete ***/ { - secp256k1_context *ctx_tmp; + rustsecp256k1_v0_1_0_context *ctx_tmp; if (use_prealloc) { /* clone into a non-preallocated context and then again into a new preallocated one. */ - ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); - free(none_prealloc); none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); - ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, none_prealloc); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = none; none = rustsecp256k1_v0_1_0_context_clone(none); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); + free(none_prealloc); none_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); + ctx_tmp = none; none = rustsecp256k1_v0_1_0_context_preallocated_clone(none, none_prealloc); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); - free(sign_prealloc); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); - ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, sign_prealloc); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = sign; sign = rustsecp256k1_v0_1_0_context_clone(sign); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); + free(sign_prealloc); sign_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); + ctx_tmp = sign; sign = rustsecp256k1_v0_1_0_context_preallocated_clone(sign, sign_prealloc); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); - free(vrfy_prealloc); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); - ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, vrfy_prealloc); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_1_0_context_clone(vrfy); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); + free(vrfy_prealloc); vrfy_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_1_0_context_preallocated_clone(vrfy, vrfy_prealloc); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); - ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); - free(both_prealloc); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); - ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, both_prealloc); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = both; both = rustsecp256k1_v0_1_0_context_clone(both); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); + free(both_prealloc); both_prealloc = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); + ctx_tmp = both; both = rustsecp256k1_v0_1_0_context_preallocated_clone(both, both_prealloc); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); } else { /* clone into a preallocated context and then again into a new non-preallocated one. */ void *prealloc_tmp; - prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); - ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); + ctx_tmp = none; none = rustsecp256k1_v0_1_0_context_preallocated_clone(none, prealloc_tmp); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); + ctx_tmp = none; none = rustsecp256k1_v0_1_0_context_clone(none); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); - ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); + ctx_tmp = sign; sign = rustsecp256k1_v0_1_0_context_preallocated_clone(sign, prealloc_tmp); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); + ctx_tmp = sign; sign = rustsecp256k1_v0_1_0_context_clone(sign); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_1_0_context_preallocated_clone(vrfy, prealloc_tmp); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_1_0_context_clone(vrfy); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_1_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = both; both = rustsecp256k1_v0_1_0_context_preallocated_clone(both, prealloc_tmp); rustsecp256k1_v0_1_0_context_destroy(ctx_tmp); + ctx_tmp = both; both = rustsecp256k1_v0_1_0_context_clone(both); rustsecp256k1_v0_1_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); } } @@ -244,91 +244,91 @@ void run_context_tests(int use_prealloc) { /* Verify that the error callback makes it across the clone. */ CHECK(vrfy->error_callback.fn != sign->error_callback.fn); /* And that it resets back to default. */ - secp256k1_context_set_error_callback(sign, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_error_callback(sign, NULL, NULL); CHECK(vrfy->error_callback.fn == sign->error_callback.fn); /*** attempt to use them ***/ random_scalar_order_test(&msg); random_scalar_order_test(&key); - secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); - secp256k1_ge_set_gej(&pub, &pubj); + rustsecp256k1_v0_1_0_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_1_0_ge_set_gej(&pub, &pubj); /* Verify context-type checking illegal-argument errors. */ memset(ctmp, 1, 32); - CHECK(secp256k1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); CHECK(ecount == 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(sign, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(sign, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); CHECK(ecount == 2); VG_UNDEF(&sig, sizeof(sig)); - CHECK(secp256k1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); VG_CHECK(&sig, sizeof(sig)); CHECK(ecount2 == 10); - CHECK(secp256k1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); CHECK(ecount2 == 11); - CHECK(secp256k1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 12); - CHECK(secp256k1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 13); - CHECK(secp256k1_ec_pubkey_negate(vrfy, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(vrfy, &pubkey) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_negate(sign, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(sign, &pubkey) == 1); CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_negate(sign, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(sign, NULL) == 0); CHECK(ecount2 == 14); - CHECK(secp256k1_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 3); - CHECK(secp256k1_context_randomize(vrfy, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_context_randomize(vrfy, ctmp) == 1); CHECK(ecount == 3); - CHECK(secp256k1_context_randomize(vrfy, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_context_randomize(vrfy, NULL) == 1); CHECK(ecount == 3); - CHECK(secp256k1_context_randomize(sign, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_context_randomize(sign, ctmp) == 1); CHECK(ecount2 == 14); - CHECK(secp256k1_context_randomize(sign, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_context_randomize(sign, NULL) == 1); CHECK(ecount2 == 14); - secp256k1_context_set_illegal_callback(vrfy, NULL, NULL); - secp256k1_context_set_illegal_callback(sign, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(vrfy, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(sign, NULL, NULL); /* obtain a working nonce */ do { random_scalar_order_test(&nonce); - } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + } while(!rustsecp256k1_v0_1_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try signing */ - CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try verifying */ - CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); /* cleanup */ if (use_prealloc) { - secp256k1_context_preallocated_destroy(none); - secp256k1_context_preallocated_destroy(sign); - secp256k1_context_preallocated_destroy(vrfy); - secp256k1_context_preallocated_destroy(both); + rustsecp256k1_v0_1_0_context_preallocated_destroy(none); + rustsecp256k1_v0_1_0_context_preallocated_destroy(sign); + rustsecp256k1_v0_1_0_context_preallocated_destroy(vrfy); + rustsecp256k1_v0_1_0_context_preallocated_destroy(both); free(none_prealloc); free(sign_prealloc); free(vrfy_prealloc); free(both_prealloc); } else { - secp256k1_context_destroy(none); - secp256k1_context_destroy(sign); - secp256k1_context_destroy(vrfy); - secp256k1_context_destroy(both); + rustsecp256k1_v0_1_0_context_destroy(none); + rustsecp256k1_v0_1_0_context_destroy(sign); + rustsecp256k1_v0_1_0_context_destroy(vrfy); + rustsecp256k1_v0_1_0_context_destroy(both); } /* Defined as no-op. */ - secp256k1_context_destroy(NULL); - secp256k1_context_preallocated_destroy(NULL); + rustsecp256k1_v0_1_0_context_destroy(NULL); + rustsecp256k1_v0_1_0_context_preallocated_destroy(NULL); } @@ -338,69 +338,69 @@ void run_scratch_tests(void) { int32_t ecount = 0; size_t checkpoint; size_t checkpoint_2; - secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - secp256k1_scratch_space *scratch; - secp256k1_scratch_space local_scratch; + rustsecp256k1_v0_1_0_context *none = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_1_0_scratch_space *scratch; + rustsecp256k1_v0_1_0_scratch_space local_scratch; /* Test public API */ - secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - scratch = secp256k1_scratch_space_create(none, 1000); + scratch = rustsecp256k1_v0_1_0_scratch_space_create(none, 1000); CHECK(scratch != NULL); CHECK(ecount == 0); /* Test internal API */ - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); CHECK(scratch->alloc_size == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating 500 bytes succeeds */ - checkpoint = secp256k1_scratch_checkpoint(&none->error_callback, scratch); - CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + checkpoint = rustsecp256k1_v0_1_0_scratch_checkpoint(&none->error_callback, scratch); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating another 500 bytes fails */ - CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* ...but it succeeds once we apply the checkpoint to undo it */ - secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(scratch->alloc_size == 0); - CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); CHECK(scratch->alloc_size != 0); /* try to apply a bad checkpoint */ - checkpoint_2 = secp256k1_scratch_checkpoint(&none->error_callback, scratch); - secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + checkpoint_2 = rustsecp256k1_v0_1_0_scratch_checkpoint(&none->error_callback, scratch); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(ecount == 0); - secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ CHECK(ecount == 1); - secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ CHECK(ecount == 2); /* try to use badly initialized scratch space */ - secp256k1_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_1_0_scratch_space_destroy(none, scratch); memset(&local_scratch, 0, sizeof(local_scratch)); scratch = &local_scratch; - CHECK(!secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0)); + CHECK(!rustsecp256k1_v0_1_0_scratch_max_allocation(&none->error_callback, scratch, 0)); CHECK(ecount == 3); - CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&none->error_callback, scratch, 500) == NULL); CHECK(ecount == 4); - secp256k1_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_1_0_scratch_space_destroy(none, scratch); CHECK(ecount == 5); /* cleanup */ - secp256k1_scratch_space_destroy(none, NULL); /* no-op */ - secp256k1_context_destroy(none); + rustsecp256k1_v0_1_0_scratch_space_destroy(none, NULL); /* no-op */ + rustsecp256k1_v0_1_0_context_destroy(none); } /***** HASH TESTS *****/ @@ -425,17 +425,17 @@ void run_sha256_tests(void) { int i; for (i = 0; i < 8; i++) { unsigned char out[32]; - secp256k1_sha256 hasher; - secp256k1_sha256_initialize(&hasher); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - secp256k1_sha256_finalize(&hasher, out); + rustsecp256k1_v0_1_0_sha256 hasher; + rustsecp256k1_v0_1_0_sha256_initialize(&hasher); + rustsecp256k1_v0_1_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_1_0_sha256_finalize(&hasher, out); CHECK(memcmp(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); - secp256k1_sha256_initialize(&hasher); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - secp256k1_sha256_finalize(&hasher, out); + int split = rustsecp256k1_v0_1_0_rand_int(strlen(inputs[i])); + rustsecp256k1_v0_1_0_sha256_initialize(&hasher); + rustsecp256k1_v0_1_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_1_0_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_1_0_sha256_finalize(&hasher, out); CHECK(memcmp(out, outputs[i], 32) == 0); } } @@ -468,18 +468,18 @@ void run_hmac_sha256_tests(void) { }; int i; for (i = 0; i < 6; i++) { - secp256k1_hmac_sha256 hasher; + rustsecp256k1_v0_1_0_hmac_sha256 hasher; unsigned char out[32]; - secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - secp256k1_hmac_sha256_finalize(&hasher, out); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hasher, out); CHECK(memcmp(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); - secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - secp256k1_hmac_sha256_finalize(&hasher, out); + int split = rustsecp256k1_v0_1_0_rand_int(strlen(inputs[i])); + rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_1_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hasher, out); CHECK(memcmp(out, outputs[i], 32) == 0); } } @@ -500,30 +500,30 @@ void run_rfc6979_hmac_sha256_tests(void) { {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} }; - secp256k1_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng; unsigned char out[32]; int i; - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(memcmp(out, out1[i], 32) == 0); } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng); - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(memcmp(out, out1[i], 32) != 0); } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng); - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(memcmp(out, out2[i], 32) == 0); } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng); } /***** RANDOM TESTS *****/ @@ -545,7 +545,7 @@ void test_rand_bits(int rand32, int bits) { /* Multiply the output of all rand calls with the odd number m, which should not change the uniformity of its distribution. */ for (i = 0; i < rounds[usebits]; i++) { - uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits)); + uint32_t r = (rand32 ? rustsecp256k1_v0_1_0_rand32() : rustsecp256k1_v0_1_0_rand_bits(bits)); CHECK((((uint64_t)r) >> bits) == 0); for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { uint32_t rm = r * mults[m]; @@ -570,7 +570,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) { uint64_t x = 0; CHECK((range % subrange) == 0); for (i = 0; i < rounds; i++) { - uint32_t r = secp256k1_rand_int(range); + uint32_t r = rustsecp256k1_v0_1_0_rand_int(range); CHECK(r < range); r = r % subrange; x |= (((uint64_t)1) << r); @@ -601,176 +601,176 @@ void run_rand_int(void) { /***** NUM TESTS *****/ #ifndef USE_NUM_NONE -void random_num_negate(secp256k1_num *num) { - if (secp256k1_rand_bits(1)) { - secp256k1_num_negate(num); +void random_num_negate(rustsecp256k1_v0_1_0_num *num) { + if (rustsecp256k1_v0_1_0_rand_bits(1)) { + rustsecp256k1_v0_1_0_num_negate(num); } } -void random_num_order_test(secp256k1_num *num) { - secp256k1_scalar sc; +void random_num_order_test(rustsecp256k1_v0_1_0_num *num) { + rustsecp256k1_v0_1_0_scalar sc; random_scalar_order_test(&sc); - secp256k1_scalar_get_num(num, &sc); + rustsecp256k1_v0_1_0_scalar_get_num(num, &sc); } -void random_num_order(secp256k1_num *num) { - secp256k1_scalar sc; +void random_num_order(rustsecp256k1_v0_1_0_num *num) { + rustsecp256k1_v0_1_0_scalar sc; random_scalar_order(&sc); - secp256k1_scalar_get_num(num, &sc); + rustsecp256k1_v0_1_0_scalar_get_num(num, &sc); } void test_num_negate(void) { - secp256k1_num n1; - secp256k1_num n2; + rustsecp256k1_v0_1_0_num n1; + rustsecp256k1_v0_1_0_num n2; random_num_order_test(&n1); /* n1 = R */ random_num_negate(&n1); - secp256k1_num_copy(&n2, &n1); /* n2 = R */ - secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ - CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); /* n1 = R */ - secp256k1_num_negate(&n1); /* n1 = -R */ - CHECK(!secp256k1_num_is_zero(&n1)); - secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ - CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); /* n1 = R */ - secp256k1_num_negate(&n1); /* n1 = -R */ - CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2)); - secp256k1_num_negate(&n1); /* n1 = R */ - CHECK(secp256k1_num_eq(&n1, &n2)); + rustsecp256k1_v0_1_0_num_copy(&n2, &n1); /* n2 = R */ + rustsecp256k1_v0_1_0_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&n1)); + rustsecp256k1_v0_1_0_num_copy(&n1, &n2); /* n1 = R */ + rustsecp256k1_v0_1_0_num_negate(&n1); /* n1 = -R */ + CHECK(!rustsecp256k1_v0_1_0_num_is_zero(&n1)); + rustsecp256k1_v0_1_0_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&n1)); + rustsecp256k1_v0_1_0_num_copy(&n1, &n2); /* n1 = R */ + rustsecp256k1_v0_1_0_num_negate(&n1); /* n1 = -R */ + CHECK(rustsecp256k1_v0_1_0_num_is_neg(&n1) != rustsecp256k1_v0_1_0_num_is_neg(&n2)); + rustsecp256k1_v0_1_0_num_negate(&n1); /* n1 = R */ + CHECK(rustsecp256k1_v0_1_0_num_eq(&n1, &n2)); } void test_num_add_sub(void) { int i; - secp256k1_scalar s; - secp256k1_num n1; - secp256k1_num n2; - secp256k1_num n1p2, n2p1, n1m2, n2m1; + rustsecp256k1_v0_1_0_scalar s; + rustsecp256k1_v0_1_0_num n1; + rustsecp256k1_v0_1_0_num n2; + rustsecp256k1_v0_1_0_num n1p2, n2p1, n1m2, n2m1; random_num_order_test(&n1); /* n1 = R1 */ - if (secp256k1_rand_bits(1)) { + if (rustsecp256k1_v0_1_0_rand_bits(1)) { random_num_negate(&n1); } random_num_order_test(&n2); /* n2 = R2 */ - if (secp256k1_rand_bits(1)) { + if (rustsecp256k1_v0_1_0_rand_bits(1)) { random_num_negate(&n2); } - secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ - secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ - secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ - secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ - CHECK(secp256k1_num_eq(&n1p2, &n2p1)); - CHECK(!secp256k1_num_eq(&n1p2, &n1m2)); - secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ - CHECK(secp256k1_num_eq(&n2m1, &n1m2)); - CHECK(!secp256k1_num_eq(&n2m1, &n1)); - secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ - CHECK(secp256k1_num_eq(&n2m1, &n1)); - CHECK(!secp256k1_num_eq(&n2p1, &n1)); - secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ - CHECK(secp256k1_num_eq(&n2p1, &n1)); + rustsecp256k1_v0_1_0_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ + rustsecp256k1_v0_1_0_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ + rustsecp256k1_v0_1_0_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ + rustsecp256k1_v0_1_0_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ + CHECK(rustsecp256k1_v0_1_0_num_eq(&n1p2, &n2p1)); + CHECK(!rustsecp256k1_v0_1_0_num_eq(&n1p2, &n1m2)); + rustsecp256k1_v0_1_0_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ + CHECK(rustsecp256k1_v0_1_0_num_eq(&n2m1, &n1m2)); + CHECK(!rustsecp256k1_v0_1_0_num_eq(&n2m1, &n1)); + rustsecp256k1_v0_1_0_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ + CHECK(rustsecp256k1_v0_1_0_num_eq(&n2m1, &n1)); + CHECK(!rustsecp256k1_v0_1_0_num_eq(&n2p1, &n1)); + rustsecp256k1_v0_1_0_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ + CHECK(rustsecp256k1_v0_1_0_num_eq(&n2p1, &n1)); /* check is_one */ - secp256k1_scalar_set_int(&s, 1); - secp256k1_scalar_get_num(&n1, &s); - CHECK(secp256k1_num_is_one(&n1)); + rustsecp256k1_v0_1_0_scalar_set_int(&s, 1); + rustsecp256k1_v0_1_0_scalar_get_num(&n1, &s); + CHECK(rustsecp256k1_v0_1_0_num_is_one(&n1)); /* check that 2^n + 1 is never 1 */ - secp256k1_scalar_get_num(&n2, &s); + rustsecp256k1_v0_1_0_scalar_get_num(&n2, &s); for (i = 0; i < 250; ++i) { - secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */ - secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ - CHECK(!secp256k1_num_is_one(&n1p2)); + rustsecp256k1_v0_1_0_num_add(&n1, &n1, &n1); /* n1 *= 2 */ + rustsecp256k1_v0_1_0_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ + CHECK(!rustsecp256k1_v0_1_0_num_is_one(&n1p2)); } } void test_num_mod(void) { int i; - secp256k1_scalar s; - secp256k1_num order, n; + rustsecp256k1_v0_1_0_scalar s; + rustsecp256k1_v0_1_0_num order, n; /* check that 0 mod anything is 0 */ random_scalar_order_test(&s); - secp256k1_scalar_get_num(&order, &s); - secp256k1_scalar_set_int(&s, 0); - secp256k1_scalar_get_num(&n, &s); - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); + rustsecp256k1_v0_1_0_scalar_get_num(&order, &s); + rustsecp256k1_v0_1_0_scalar_set_int(&s, 0); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &s); + rustsecp256k1_v0_1_0_num_mod(&n, &order); + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&n)); /* check that anything mod 1 is 0 */ - secp256k1_scalar_set_int(&s, 1); - secp256k1_scalar_get_num(&order, &s); - secp256k1_scalar_get_num(&n, &s); - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); + rustsecp256k1_v0_1_0_scalar_set_int(&s, 1); + rustsecp256k1_v0_1_0_scalar_get_num(&order, &s); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &s); + rustsecp256k1_v0_1_0_num_mod(&n, &order); + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&n)); /* check that increasing the number past 2^256 does not break this */ random_scalar_order_test(&s); - secp256k1_scalar_get_num(&n, &s); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &s); /* multiply by 2^8, which'll test this case with high probability */ for (i = 0; i < 8; ++i) { - secp256k1_num_add(&n, &n, &n); + rustsecp256k1_v0_1_0_num_add(&n, &n, &n); } - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); + rustsecp256k1_v0_1_0_num_mod(&n, &order); + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&n)); } void test_num_jacobi(void) { - secp256k1_scalar sqr; - secp256k1_scalar small; - secp256k1_scalar five; /* five is not a quadratic residue */ - secp256k1_num order, n; + rustsecp256k1_v0_1_0_scalar sqr; + rustsecp256k1_v0_1_0_scalar small; + rustsecp256k1_v0_1_0_scalar five; /* five is not a quadratic residue */ + rustsecp256k1_v0_1_0_num order, n; int i; /* squares mod 5 are 1, 4 */ const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; /* check some small values with 5 as the order */ - secp256k1_scalar_set_int(&five, 5); - secp256k1_scalar_get_num(&order, &five); + rustsecp256k1_v0_1_0_scalar_set_int(&five, 5); + rustsecp256k1_v0_1_0_scalar_get_num(&order, &five); for (i = 0; i < 10; ++i) { - secp256k1_scalar_set_int(&small, i); - secp256k1_scalar_get_num(&n, &small); - CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]); + rustsecp256k1_v0_1_0_scalar_set_int(&small, i); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &small); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == jacobi5[i]); } /** test large values with 5 as group order */ - secp256k1_scalar_get_num(&order, &five); + rustsecp256k1_v0_1_0_scalar_get_num(&order, &five); /* we first need a scalar which is not a multiple of 5 */ do { - secp256k1_num fiven; + rustsecp256k1_v0_1_0_num fiven; random_scalar_order_test(&sqr); - secp256k1_scalar_get_num(&fiven, &five); - secp256k1_scalar_get_num(&n, &sqr); - secp256k1_num_mod(&n, &fiven); - } while (secp256k1_num_is_zero(&n)); + rustsecp256k1_v0_1_0_scalar_get_num(&fiven, &five); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &sqr); + rustsecp256k1_v0_1_0_num_mod(&n, &fiven); + } while (rustsecp256k1_v0_1_0_num_is_zero(&n)); /* next force it to be a residue. 2 is a nonresidue mod 5 so we can * just multiply by two, i.e. add the number to itself */ - if (secp256k1_num_jacobi(&n, &order) == -1) { - secp256k1_num_add(&n, &n, &n); + if (rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == -1) { + rustsecp256k1_v0_1_0_num_add(&n, &n, &n); } /* test residue */ - CHECK(secp256k1_num_jacobi(&n, &order) == 1); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == 1); /* test nonresidue */ - secp256k1_num_add(&n, &n, &n); - CHECK(secp256k1_num_jacobi(&n, &order) == -1); + rustsecp256k1_v0_1_0_num_add(&n, &n, &n); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == -1); /** test with secp group order as order */ - secp256k1_scalar_order_get_num(&order); + rustsecp256k1_v0_1_0_scalar_order_get_num(&order); random_scalar_order_test(&sqr); - secp256k1_scalar_sqr(&sqr, &sqr); + rustsecp256k1_v0_1_0_scalar_sqr(&sqr, &sqr); /* test residue */ - secp256k1_scalar_get_num(&n, &sqr); - CHECK(secp256k1_num_jacobi(&n, &order) == 1); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &sqr); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == 1); /* test nonresidue */ - secp256k1_scalar_mul(&sqr, &sqr, &five); - secp256k1_scalar_get_num(&n, &sqr); - CHECK(secp256k1_num_jacobi(&n, &order) == -1); + rustsecp256k1_v0_1_0_scalar_mul(&sqr, &sqr, &five); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &sqr); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == -1); /* test multiple of the order*/ - CHECK(secp256k1_num_jacobi(&order, &order) == 0); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&order, &order) == 0); /* check one less than the order */ - secp256k1_scalar_set_int(&small, 1); - secp256k1_scalar_get_num(&n, &small); - secp256k1_num_sub(&n, &order, &n); - CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ + rustsecp256k1_v0_1_0_scalar_set_int(&small, 1); + rustsecp256k1_v0_1_0_scalar_get_num(&n, &small); + rustsecp256k1_v0_1_0_num_sub(&n, &order, &n); + CHECK(rustsecp256k1_v0_1_0_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ } void run_num_smalltests(void) { @@ -787,12 +787,12 @@ void run_num_smalltests(void) { /***** SCALAR TESTS *****/ void scalar_test(void) { - secp256k1_scalar s; - secp256k1_scalar s1; - secp256k1_scalar s2; + rustsecp256k1_v0_1_0_scalar s; + rustsecp256k1_v0_1_0_scalar s1; + rustsecp256k1_v0_1_0_scalar s2; #ifndef USE_NUM_NONE - secp256k1_num snum, s1num, s2num; - secp256k1_num order, half_order; + rustsecp256k1_v0_1_0_num snum, s1num, s2num; + rustsecp256k1_v0_1_0_num order, half_order; #endif unsigned char c[32]; @@ -804,141 +804,141 @@ void scalar_test(void) { /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ random_scalar_order_test(&s2); - secp256k1_scalar_get_b32(c, &s2); + rustsecp256k1_v0_1_0_scalar_get_b32(c, &s2); #ifndef USE_NUM_NONE - secp256k1_scalar_get_num(&snum, &s); - secp256k1_scalar_get_num(&s1num, &s1); - secp256k1_scalar_get_num(&s2num, &s2); + rustsecp256k1_v0_1_0_scalar_get_num(&snum, &s); + rustsecp256k1_v0_1_0_scalar_get_num(&s1num, &s1); + rustsecp256k1_v0_1_0_scalar_get_num(&s2num, &s2); - secp256k1_scalar_order_get_num(&order); + rustsecp256k1_v0_1_0_scalar_order_get_num(&order); half_order = order; - secp256k1_num_shift(&half_order, 1); + rustsecp256k1_v0_1_0_num_shift(&half_order, 1); #endif { int i; /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ - secp256k1_scalar n; - secp256k1_scalar_set_int(&n, 0); + rustsecp256k1_v0_1_0_scalar n; + rustsecp256k1_v0_1_0_scalar_set_int(&n, 0); for (i = 0; i < 256; i += 4) { - secp256k1_scalar t; + rustsecp256k1_v0_1_0_scalar t; int j; - secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); + rustsecp256k1_v0_1_0_scalar_set_int(&t, rustsecp256k1_v0_1_0_scalar_get_bits(&s, 256 - 4 - i, 4)); for (j = 0; j < 4; j++) { - secp256k1_scalar_add(&n, &n, &n); + rustsecp256k1_v0_1_0_scalar_add(&n, &n, &n); } - secp256k1_scalar_add(&n, &n, &t); + rustsecp256k1_v0_1_0_scalar_add(&n, &n, &t); } - CHECK(secp256k1_scalar_eq(&n, &s)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&n, &s)); } { /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ - secp256k1_scalar n; + rustsecp256k1_v0_1_0_scalar n; int i = 0; - secp256k1_scalar_set_int(&n, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&n, 0); while (i < 256) { - secp256k1_scalar t; + rustsecp256k1_v0_1_0_scalar t; int j; - int now = secp256k1_rand_int(15) + 1; + int now = rustsecp256k1_v0_1_0_rand_int(15) + 1; if (now + i > 256) { now = 256 - i; } - secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now)); + rustsecp256k1_v0_1_0_scalar_set_int(&t, rustsecp256k1_v0_1_0_scalar_get_bits_var(&s, 256 - now - i, now)); for (j = 0; j < now; j++) { - secp256k1_scalar_add(&n, &n, &n); + rustsecp256k1_v0_1_0_scalar_add(&n, &n, &n); } - secp256k1_scalar_add(&n, &n, &t); + rustsecp256k1_v0_1_0_scalar_add(&n, &n, &t); i += now; } - CHECK(secp256k1_scalar_eq(&n, &s)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&n, &s)); } #ifndef USE_NUM_NONE { /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ - secp256k1_num rnum; - secp256k1_num r2num; - secp256k1_scalar r; - secp256k1_num_add(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &order); - secp256k1_scalar_add(&r, &s, &s2); - secp256k1_scalar_get_num(&r2num, &r); - CHECK(secp256k1_num_eq(&rnum, &r2num)); + rustsecp256k1_v0_1_0_num rnum; + rustsecp256k1_v0_1_0_num r2num; + rustsecp256k1_v0_1_0_scalar r; + rustsecp256k1_v0_1_0_num_add(&rnum, &snum, &s2num); + rustsecp256k1_v0_1_0_num_mod(&rnum, &order); + rustsecp256k1_v0_1_0_scalar_add(&r, &s, &s2); + rustsecp256k1_v0_1_0_scalar_get_num(&r2num, &r); + CHECK(rustsecp256k1_v0_1_0_num_eq(&rnum, &r2num)); } { /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */ - secp256k1_scalar r; - secp256k1_num r2num; - secp256k1_num rnum; - secp256k1_num_mul(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &order); - secp256k1_scalar_mul(&r, &s, &s2); - secp256k1_scalar_get_num(&r2num, &r); - CHECK(secp256k1_num_eq(&rnum, &r2num)); + rustsecp256k1_v0_1_0_scalar r; + rustsecp256k1_v0_1_0_num r2num; + rustsecp256k1_v0_1_0_num rnum; + rustsecp256k1_v0_1_0_num_mul(&rnum, &snum, &s2num); + rustsecp256k1_v0_1_0_num_mod(&rnum, &order); + rustsecp256k1_v0_1_0_scalar_mul(&r, &s, &s2); + rustsecp256k1_v0_1_0_scalar_get_num(&r2num, &r); + CHECK(rustsecp256k1_v0_1_0_num_eq(&rnum, &r2num)); /* The result can only be zero if at least one of the factors was zero. */ - CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&r) == (rustsecp256k1_v0_1_0_scalar_is_zero(&s) || rustsecp256k1_v0_1_0_scalar_is_zero(&s2))); /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ - CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); - CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); + CHECK(rustsecp256k1_v0_1_0_num_eq(&rnum, &snum) == (rustsecp256k1_v0_1_0_scalar_is_zero(&s) || rustsecp256k1_v0_1_0_scalar_is_one(&s2))); + CHECK(rustsecp256k1_v0_1_0_num_eq(&rnum, &s2num) == (rustsecp256k1_v0_1_0_scalar_is_zero(&s2) || rustsecp256k1_v0_1_0_scalar_is_one(&s))); } { - secp256k1_scalar neg; - secp256k1_num negnum; - secp256k1_num negnum2; + rustsecp256k1_v0_1_0_scalar neg; + rustsecp256k1_v0_1_0_num negnum; + rustsecp256k1_v0_1_0_num negnum2; /* Check that comparison with zero matches comparison with zero on the number. */ - CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); + CHECK(rustsecp256k1_v0_1_0_num_is_zero(&snum) == rustsecp256k1_v0_1_0_scalar_is_zero(&s)); /* Check that comparison with the half order is equal to testing for high scalar. */ - CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0)); - secp256k1_scalar_negate(&neg, &s); - secp256k1_num_sub(&negnum, &order, &snum); - secp256k1_num_mod(&negnum, &order); + CHECK(rustsecp256k1_v0_1_0_scalar_is_high(&s) == (rustsecp256k1_v0_1_0_num_cmp(&snum, &half_order) > 0)); + rustsecp256k1_v0_1_0_scalar_negate(&neg, &s); + rustsecp256k1_v0_1_0_num_sub(&negnum, &order, &snum); + rustsecp256k1_v0_1_0_num_mod(&negnum, &order); /* Check that comparison with the half order is equal to testing for high scalar after negation. */ - CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0)); + CHECK(rustsecp256k1_v0_1_0_scalar_is_high(&neg) == (rustsecp256k1_v0_1_0_num_cmp(&negnum, &half_order) > 0)); /* Negating should change the high property, unless the value was already zero. */ - CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); - secp256k1_scalar_get_num(&negnum2, &neg); + CHECK((rustsecp256k1_v0_1_0_scalar_is_high(&s) == rustsecp256k1_v0_1_0_scalar_is_high(&neg)) == rustsecp256k1_v0_1_0_scalar_is_zero(&s)); + rustsecp256k1_v0_1_0_scalar_get_num(&negnum2, &neg); /* Negating a scalar should be equal to (order - n) mod order on the number. */ - CHECK(secp256k1_num_eq(&negnum, &negnum2)); - secp256k1_scalar_add(&neg, &neg, &s); + CHECK(rustsecp256k1_v0_1_0_num_eq(&negnum, &negnum2)); + rustsecp256k1_v0_1_0_scalar_add(&neg, &neg, &s); /* Adding a number to its negation should result in zero. */ - CHECK(secp256k1_scalar_is_zero(&neg)); - secp256k1_scalar_negate(&neg, &neg); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&neg)); + rustsecp256k1_v0_1_0_scalar_negate(&neg, &neg); /* Negating zero should still result in zero. */ - CHECK(secp256k1_scalar_is_zero(&neg)); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&neg)); } { - /* Test secp256k1_scalar_mul_shift_var. */ - secp256k1_scalar r; - secp256k1_num one; - secp256k1_num rnum; - secp256k1_num rnum2; + /* Test rustsecp256k1_v0_1_0_scalar_mul_shift_var. */ + rustsecp256k1_v0_1_0_scalar r; + rustsecp256k1_v0_1_0_num one; + rustsecp256k1_v0_1_0_num rnum; + rustsecp256k1_v0_1_0_num rnum2; unsigned char cone[1] = {0x01}; - unsigned int shift = 256 + secp256k1_rand_int(257); - secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); - secp256k1_num_mul(&rnum, &s1num, &s2num); - secp256k1_num_shift(&rnum, shift - 1); - secp256k1_num_set_bin(&one, cone, 1); - secp256k1_num_add(&rnum, &rnum, &one); - secp256k1_num_shift(&rnum, 1); - secp256k1_scalar_get_num(&rnum2, &r); - CHECK(secp256k1_num_eq(&rnum, &rnum2)); + unsigned int shift = 256 + rustsecp256k1_v0_1_0_rand_int(257); + rustsecp256k1_v0_1_0_scalar_mul_shift_var(&r, &s1, &s2, shift); + rustsecp256k1_v0_1_0_num_mul(&rnum, &s1num, &s2num); + rustsecp256k1_v0_1_0_num_shift(&rnum, shift - 1); + rustsecp256k1_v0_1_0_num_set_bin(&one, cone, 1); + rustsecp256k1_v0_1_0_num_add(&rnum, &rnum, &one); + rustsecp256k1_v0_1_0_num_shift(&rnum, 1); + rustsecp256k1_v0_1_0_scalar_get_num(&rnum2, &r); + CHECK(rustsecp256k1_v0_1_0_num_eq(&rnum, &rnum2)); } { - /* test secp256k1_scalar_shr_int */ - secp256k1_scalar r; + /* test rustsecp256k1_v0_1_0_scalar_shr_int */ + rustsecp256k1_v0_1_0_scalar r; int i; random_scalar_order_test(&r); for (i = 0; i < 100; ++i) { int low; - int shift = 1 + secp256k1_rand_int(15); + int shift = 1 + rustsecp256k1_v0_1_0_rand_int(15); int expected = r.d[0] % (1 << shift); - low = secp256k1_scalar_shr_int(&r, shift); + low = rustsecp256k1_v0_1_0_scalar_shr_int(&r, shift); CHECK(expected == low); } } @@ -946,131 +946,131 @@ void scalar_test(void) { { /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ - if (!secp256k1_scalar_is_zero(&s)) { - secp256k1_scalar inv; + if (!rustsecp256k1_v0_1_0_scalar_is_zero(&s)) { + rustsecp256k1_v0_1_0_scalar inv; #ifndef USE_NUM_NONE - secp256k1_num invnum; - secp256k1_num invnum2; + rustsecp256k1_v0_1_0_num invnum; + rustsecp256k1_v0_1_0_num invnum2; #endif - secp256k1_scalar_inverse(&inv, &s); + rustsecp256k1_v0_1_0_scalar_inverse(&inv, &s); #ifndef USE_NUM_NONE - secp256k1_num_mod_inverse(&invnum, &snum, &order); - secp256k1_scalar_get_num(&invnum2, &inv); - CHECK(secp256k1_num_eq(&invnum, &invnum2)); + rustsecp256k1_v0_1_0_num_mod_inverse(&invnum, &snum, &order); + rustsecp256k1_v0_1_0_scalar_get_num(&invnum2, &inv); + CHECK(rustsecp256k1_v0_1_0_num_eq(&invnum, &invnum2)); #endif - secp256k1_scalar_mul(&inv, &inv, &s); + rustsecp256k1_v0_1_0_scalar_mul(&inv, &inv, &s); /* Multiplying a scalar with its inverse must result in one. */ - CHECK(secp256k1_scalar_is_one(&inv)); - secp256k1_scalar_inverse(&inv, &inv); + CHECK(rustsecp256k1_v0_1_0_scalar_is_one(&inv)); + rustsecp256k1_v0_1_0_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ - CHECK(secp256k1_scalar_is_one(&inv)); + CHECK(rustsecp256k1_v0_1_0_scalar_is_one(&inv)); #ifndef USE_NUM_NONE - secp256k1_scalar_get_num(&invnum, &inv); - CHECK(secp256k1_num_is_one(&invnum)); + rustsecp256k1_v0_1_0_scalar_get_num(&invnum, &inv); + CHECK(rustsecp256k1_v0_1_0_num_is_one(&invnum)); #endif } } { /* Test commutativity of add. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_add(&r2, &s2, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_1_0_scalar_add(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { - secp256k1_scalar r1, r2; - secp256k1_scalar b; + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar b; int i; /* Test add_bit. */ - int bit = secp256k1_rand_bits(8); - secp256k1_scalar_set_int(&b, 1); - CHECK(secp256k1_scalar_is_one(&b)); + int bit = rustsecp256k1_v0_1_0_rand_bits(8); + rustsecp256k1_v0_1_0_scalar_set_int(&b, 1); + CHECK(rustsecp256k1_v0_1_0_scalar_is_one(&b)); for (i = 0; i < bit; i++) { - secp256k1_scalar_add(&b, &b, &b); + rustsecp256k1_v0_1_0_scalar_add(&b, &b, &b); } r1 = s1; r2 = s1; - if (!secp256k1_scalar_add(&r1, &r1, &b)) { + if (!rustsecp256k1_v0_1_0_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ - secp256k1_scalar_cadd_bit(&r2, bit, 1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar_cadd_bit(&r2, bit, 1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); /* cadd is a noop when flag is zero */ - secp256k1_scalar_cadd_bit(&r2, bit, 0); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar_cadd_bit(&r2, bit, 0); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_mul(&r1, &s1, &s2); - secp256k1_scalar_mul(&r2, &s2, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_1_0_scalar_mul(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_add(&r1, &r1, &s); - secp256k1_scalar_add(&r2, &s2, &s); - secp256k1_scalar_add(&r2, &s1, &r2); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_1_0_scalar_add(&r1, &r1, &s); + rustsecp256k1_v0_1_0_scalar_add(&r2, &s2, &s); + rustsecp256k1_v0_1_0_scalar_add(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_mul(&r1, &s1, &s2); - secp256k1_scalar_mul(&r1, &r1, &s); - secp256k1_scalar_mul(&r2, &s2, &s); - secp256k1_scalar_mul(&r2, &s1, &r2); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_1_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_1_0_scalar_mul(&r2, &s2, &s); + rustsecp256k1_v0_1_0_scalar_mul(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ - secp256k1_scalar r1, r2, t; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_mul(&r1, &r1, &s); - secp256k1_scalar_mul(&r2, &s1, &s); - secp256k1_scalar_mul(&t, &s2, &s); - secp256k1_scalar_add(&r2, &r2, &t); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2, t; + rustsecp256k1_v0_1_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_1_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_1_0_scalar_mul(&r2, &s1, &s); + rustsecp256k1_v0_1_0_scalar_mul(&t, &s2, &s); + rustsecp256k1_v0_1_0_scalar_add(&r2, &r2, &t); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { /* Test square. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_sqr(&r1, &s1); - secp256k1_scalar_mul(&r2, &s1, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_1_0_scalar r1, r2; + rustsecp256k1_v0_1_0_scalar_sqr(&r1, &s1); + rustsecp256k1_v0_1_0_scalar_mul(&r2, &s1, &s1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &r2)); } { /* Test multiplicative identity. */ - secp256k1_scalar r1, v1; - secp256k1_scalar_set_int(&v1,1); - secp256k1_scalar_mul(&r1, &s1, &v1); - CHECK(secp256k1_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_1_0_scalar r1, v1; + rustsecp256k1_v0_1_0_scalar_set_int(&v1,1); + rustsecp256k1_v0_1_0_scalar_mul(&r1, &s1, &v1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &s1)); } { /* Test additive identity. */ - secp256k1_scalar r1, v0; - secp256k1_scalar_set_int(&v0,0); - secp256k1_scalar_add(&r1, &s1, &v0); - CHECK(secp256k1_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_1_0_scalar r1, v0; + rustsecp256k1_v0_1_0_scalar_set_int(&v0,0); + rustsecp256k1_v0_1_0_scalar_add(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &s1)); } { /* Test zero product property. */ - secp256k1_scalar r1, v0; - secp256k1_scalar_set_int(&v0,0); - secp256k1_scalar_mul(&r1, &s1, &v0); - CHECK(secp256k1_scalar_eq(&r1, &v0)); + rustsecp256k1_v0_1_0_scalar r1, v0; + rustsecp256k1_v0_1_0_scalar_set_int(&v0,0); + rustsecp256k1_v0_1_0_scalar_mul(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &v0)); } } @@ -1083,38 +1083,38 @@ void run_scalar_tests(void) { { /* (-1)+1 should be zero. */ - secp256k1_scalar s, o; - secp256k1_scalar_set_int(&s, 1); - CHECK(secp256k1_scalar_is_one(&s)); - secp256k1_scalar_negate(&o, &s); - secp256k1_scalar_add(&o, &o, &s); - CHECK(secp256k1_scalar_is_zero(&o)); - secp256k1_scalar_negate(&o, &o); - CHECK(secp256k1_scalar_is_zero(&o)); + rustsecp256k1_v0_1_0_scalar s, o; + rustsecp256k1_v0_1_0_scalar_set_int(&s, 1); + CHECK(rustsecp256k1_v0_1_0_scalar_is_one(&s)); + rustsecp256k1_v0_1_0_scalar_negate(&o, &s); + rustsecp256k1_v0_1_0_scalar_add(&o, &o, &s); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&o)); + rustsecp256k1_v0_1_0_scalar_negate(&o, &o); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&o)); } #ifndef USE_NUM_NONE { /* A scalar with value of the curve order should be 0. */ - secp256k1_num order; - secp256k1_scalar zero; + rustsecp256k1_v0_1_0_num order; + rustsecp256k1_v0_1_0_scalar zero; unsigned char bin[32]; int overflow = 0; - secp256k1_scalar_order_get_num(&order); - secp256k1_num_get_bin(bin, 32, &order); - secp256k1_scalar_set_b32(&zero, bin, &overflow); + rustsecp256k1_v0_1_0_scalar_order_get_num(&order); + rustsecp256k1_v0_1_0_num_get_bin(bin, 32, &order); + rustsecp256k1_v0_1_0_scalar_set_b32(&zero, bin, &overflow); CHECK(overflow == 1); - CHECK(secp256k1_scalar_is_zero(&zero)); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&zero)); } #endif { /* Does check_overflow check catch all ones? */ - static const secp256k1_scalar overflowed = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_1_0_scalar overflowed = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - CHECK(secp256k1_scalar_check_overflow(&overflowed)); + CHECK(rustsecp256k1_v0_1_0_scalar_check_overflow(&overflowed)); } { @@ -1123,15 +1123,15 @@ void run_scalar_tests(void) { * and edge-case coverage on 32-bit and 64-bit implementations. * The responses were generated with Sage 5.9. */ - secp256k1_scalar x; - secp256k1_scalar y; - secp256k1_scalar z; - secp256k1_scalar zz; - secp256k1_scalar one; - secp256k1_scalar r1; - secp256k1_scalar r2; + rustsecp256k1_v0_1_0_scalar x; + rustsecp256k1_v0_1_0_scalar y; + rustsecp256k1_v0_1_0_scalar z; + rustsecp256k1_v0_1_0_scalar zz; + rustsecp256k1_v0_1_0_scalar one; + rustsecp256k1_v0_1_0_scalar r1; + rustsecp256k1_v0_1_0_scalar r2; #if defined(USE_SCALAR_INV_NUM) - secp256k1_scalar zzv; + rustsecp256k1_v0_1_0_scalar zzv; #endif int overflow; unsigned char chal[33][2][32] = { @@ -1666,71 +1666,71 @@ void run_scalar_tests(void) { 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; - secp256k1_scalar_set_int(&one, 1); + rustsecp256k1_v0_1_0_scalar_set_int(&one, 1); for (i = 0; i < 33; i++) { - secp256k1_scalar_set_b32(&x, chal[i][0], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); - secp256k1_scalar_set_b32(&y, chal[i][1], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&y, chal[i][1], &overflow); CHECK(!overflow); - secp256k1_scalar_set_b32(&r1, res[i][0], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&r1, res[i][0], &overflow); CHECK(!overflow); - secp256k1_scalar_set_b32(&r2, res[i][1], &overflow); + rustsecp256k1_v0_1_0_scalar_set_b32(&r2, res[i][1], &overflow); CHECK(!overflow); - secp256k1_scalar_mul(&z, &x, &y); - CHECK(!secp256k1_scalar_check_overflow(&z)); - CHECK(secp256k1_scalar_eq(&r1, &z)); - if (!secp256k1_scalar_is_zero(&y)) { - secp256k1_scalar_inverse(&zz, &y); - CHECK(!secp256k1_scalar_check_overflow(&zz)); + rustsecp256k1_v0_1_0_scalar_mul(&z, &x, &y); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r1, &z)); + if (!rustsecp256k1_v0_1_0_scalar_is_zero(&y)) { + rustsecp256k1_v0_1_0_scalar_inverse(&zz, &y); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&zz)); #if defined(USE_SCALAR_INV_NUM) - secp256k1_scalar_inverse_var(&zzv, &y); - CHECK(secp256k1_scalar_eq(&zzv, &zz)); + rustsecp256k1_v0_1_0_scalar_inverse_var(&zzv, &y); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&zzv, &zz)); #endif - secp256k1_scalar_mul(&z, &z, &zz); - CHECK(!secp256k1_scalar_check_overflow(&z)); - CHECK(secp256k1_scalar_eq(&x, &z)); - secp256k1_scalar_mul(&zz, &zz, &y); - CHECK(!secp256k1_scalar_check_overflow(&zz)); - CHECK(secp256k1_scalar_eq(&one, &zz)); + rustsecp256k1_v0_1_0_scalar_mul(&z, &z, &zz); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&x, &z)); + rustsecp256k1_v0_1_0_scalar_mul(&zz, &zz, &y); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&zz)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&one, &zz)); } - secp256k1_scalar_mul(&z, &x, &x); - CHECK(!secp256k1_scalar_check_overflow(&z)); - secp256k1_scalar_sqr(&zz, &x); - CHECK(!secp256k1_scalar_check_overflow(&zz)); - CHECK(secp256k1_scalar_eq(&zz, &z)); - CHECK(secp256k1_scalar_eq(&r2, &zz)); + rustsecp256k1_v0_1_0_scalar_mul(&z, &x, &x); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&z)); + rustsecp256k1_v0_1_0_scalar_sqr(&zz, &x); + CHECK(!rustsecp256k1_v0_1_0_scalar_check_overflow(&zz)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&zz, &z)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&r2, &zz)); } } } /***** FIELD TESTS *****/ -void random_fe(secp256k1_fe *x) { +void random_fe(rustsecp256k1_v0_1_0_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256(bin); - if (secp256k1_fe_set_b32(x, bin)) { + rustsecp256k1_v0_1_0_rand256(bin); + if (rustsecp256k1_v0_1_0_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_test(secp256k1_fe *x) { +void random_fe_test(rustsecp256k1_v0_1_0_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256_test(bin); - if (secp256k1_fe_set_b32(x, bin)) { + rustsecp256k1_v0_1_0_rand256_test(bin); + if (rustsecp256k1_v0_1_0_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_non_zero(secp256k1_fe *nz) { +void random_fe_non_zero(rustsecp256k1_v0_1_0_fe *nz) { int tries = 10; while (--tries >= 0) { random_fe(nz); - secp256k1_fe_normalize(nz); - if (!secp256k1_fe_is_zero(nz)) { + rustsecp256k1_v0_1_0_fe_normalize(nz); + if (!rustsecp256k1_v0_1_0_fe_is_zero(nz)) { break; } } @@ -1738,26 +1738,26 @@ void random_fe_non_zero(secp256k1_fe *nz) { CHECK(tries >= 0); } -void random_fe_non_square(secp256k1_fe *ns) { - secp256k1_fe r; +void random_fe_non_square(rustsecp256k1_v0_1_0_fe *ns) { + rustsecp256k1_v0_1_0_fe r; random_fe_non_zero(ns); - if (secp256k1_fe_sqrt(&r, ns)) { - secp256k1_fe_negate(ns, ns, 1); + if (rustsecp256k1_v0_1_0_fe_sqrt(&r, ns)) { + rustsecp256k1_v0_1_0_fe_negate(ns, ns, 1); } } -int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe an = *a; - secp256k1_fe bn = *b; - secp256k1_fe_normalize_weak(&an); - secp256k1_fe_normalize_var(&bn); - return secp256k1_fe_equal_var(&an, &bn); +int check_fe_equal(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { + rustsecp256k1_v0_1_0_fe an = *a; + rustsecp256k1_v0_1_0_fe bn = *b; + rustsecp256k1_v0_1_0_fe_normalize_weak(&an); + rustsecp256k1_v0_1_0_fe_normalize_var(&bn); + return rustsecp256k1_v0_1_0_fe_equal_var(&an, &bn); } -int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) { - secp256k1_fe x; - secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_fe_mul(&x, a, ai); +int check_fe_inverse(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *ai) { + rustsecp256k1_v0_1_0_fe x; + rustsecp256k1_v0_1_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_1_0_fe_mul(&x, a, ai); return check_fe_equal(&x, &one); } @@ -1768,150 +1768,150 @@ void run_field_convert(void) { 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 }; - static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_1_0_fe_storage fes = SECP256K1_FE_STORAGE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - static const secp256k1_fe fe = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_1_0_fe fe = SECP256K1_FE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - secp256k1_fe fe2; + rustsecp256k1_v0_1_0_fe fe2; unsigned char b322[32]; - secp256k1_fe_storage fes2; + rustsecp256k1_v0_1_0_fe_storage fes2; /* Check conversions to fe. */ - CHECK(secp256k1_fe_set_b32(&fe2, b32)); - CHECK(secp256k1_fe_equal_var(&fe, &fe2)); - secp256k1_fe_from_storage(&fe2, &fes); - CHECK(secp256k1_fe_equal_var(&fe, &fe2)); + CHECK(rustsecp256k1_v0_1_0_fe_set_b32(&fe2, b32)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&fe, &fe2)); + rustsecp256k1_v0_1_0_fe_from_storage(&fe2, &fes); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&fe, &fe2)); /* Check conversion from fe. */ - secp256k1_fe_get_b32(b322, &fe); + rustsecp256k1_v0_1_0_fe_get_b32(b322, &fe); CHECK(memcmp(b322, b32, 32) == 0); - secp256k1_fe_to_storage(&fes2, &fe); + rustsecp256k1_v0_1_0_fe_to_storage(&fes2, &fe); CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0); } -int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe t = *b; +int fe_memcmp(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) { + rustsecp256k1_v0_1_0_fe t = *b; #ifdef VERIFY t.magnitude = a->magnitude; t.normalized = a->normalized; #endif - return memcmp(a, &t, sizeof(secp256k1_fe)); + return memcmp(a, &t, sizeof(rustsecp256k1_v0_1_0_fe)); } void run_field_misc(void) { - secp256k1_fe x; - secp256k1_fe y; - secp256k1_fe z; - secp256k1_fe q; - secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); + rustsecp256k1_v0_1_0_fe x; + rustsecp256k1_v0_1_0_fe y; + rustsecp256k1_v0_1_0_fe z; + rustsecp256k1_v0_1_0_fe q; + rustsecp256k1_v0_1_0_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); int i, j; for (i = 0; i < 5*count; i++) { - secp256k1_fe_storage xs, ys, zs; + rustsecp256k1_v0_1_0_fe_storage xs, ys, zs; random_fe(&x); random_fe_non_zero(&y); /* Test the fe equality and comparison operations. */ - CHECK(secp256k1_fe_cmp_var(&x, &x) == 0); - CHECK(secp256k1_fe_equal_var(&x, &x)); + CHECK(rustsecp256k1_v0_1_0_fe_cmp_var(&x, &x) == 0); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&x, &x)); z = x; - secp256k1_fe_add(&z,&y); + rustsecp256k1_v0_1_0_fe_add(&z,&y); /* Test fe conditional move; z is not normalized here. */ q = x; - secp256k1_fe_cmov(&x, &z, 0); + rustsecp256k1_v0_1_0_fe_cmov(&x, &z, 0); VERIFY_CHECK(!x.normalized && x.magnitude == z.magnitude); - secp256k1_fe_cmov(&x, &x, 1); + rustsecp256k1_v0_1_0_fe_cmov(&x, &x, 1); CHECK(fe_memcmp(&x, &z) != 0); CHECK(fe_memcmp(&x, &q) == 0); - secp256k1_fe_cmov(&q, &z, 1); + rustsecp256k1_v0_1_0_fe_cmov(&q, &z, 1); VERIFY_CHECK(!q.normalized && q.magnitude == z.magnitude); CHECK(fe_memcmp(&q, &z) == 0); - secp256k1_fe_normalize_var(&x); - secp256k1_fe_normalize_var(&z); - CHECK(!secp256k1_fe_equal_var(&x, &z)); - secp256k1_fe_normalize_var(&q); - secp256k1_fe_cmov(&q, &z, (i&1)); + rustsecp256k1_v0_1_0_fe_normalize_var(&x); + rustsecp256k1_v0_1_0_fe_normalize_var(&z); + CHECK(!rustsecp256k1_v0_1_0_fe_equal_var(&x, &z)); + rustsecp256k1_v0_1_0_fe_normalize_var(&q); + rustsecp256k1_v0_1_0_fe_cmov(&q, &z, (i&1)); VERIFY_CHECK(q.normalized && q.magnitude == 1); for (j = 0; j < 6; j++) { - secp256k1_fe_negate(&z, &z, j+1); - secp256k1_fe_normalize_var(&q); - secp256k1_fe_cmov(&q, &z, (j&1)); + rustsecp256k1_v0_1_0_fe_negate(&z, &z, j+1); + rustsecp256k1_v0_1_0_fe_normalize_var(&q); + rustsecp256k1_v0_1_0_fe_cmov(&q, &z, (j&1)); VERIFY_CHECK(!q.normalized && q.magnitude == (j+2)); } - secp256k1_fe_normalize_var(&z); + rustsecp256k1_v0_1_0_fe_normalize_var(&z); /* Test storage conversion and conditional moves. */ - secp256k1_fe_to_storage(&xs, &x); - secp256k1_fe_to_storage(&ys, &y); - secp256k1_fe_to_storage(&zs, &z); - secp256k1_fe_storage_cmov(&zs, &xs, 0); - secp256k1_fe_storage_cmov(&zs, &zs, 1); + rustsecp256k1_v0_1_0_fe_to_storage(&xs, &x); + rustsecp256k1_v0_1_0_fe_to_storage(&ys, &y); + rustsecp256k1_v0_1_0_fe_to_storage(&zs, &z); + rustsecp256k1_v0_1_0_fe_storage_cmov(&zs, &xs, 0); + rustsecp256k1_v0_1_0_fe_storage_cmov(&zs, &zs, 1); CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0); - secp256k1_fe_storage_cmov(&ys, &xs, 1); + rustsecp256k1_v0_1_0_fe_storage_cmov(&ys, &xs, 1); CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0); - secp256k1_fe_from_storage(&x, &xs); - secp256k1_fe_from_storage(&y, &ys); - secp256k1_fe_from_storage(&z, &zs); + rustsecp256k1_v0_1_0_fe_from_storage(&x, &xs); + rustsecp256k1_v0_1_0_fe_from_storage(&y, &ys); + rustsecp256k1_v0_1_0_fe_from_storage(&z, &zs); /* Test that mul_int, mul, and add agree. */ - secp256k1_fe_add(&y, &x); - secp256k1_fe_add(&y, &x); + rustsecp256k1_v0_1_0_fe_add(&y, &x); + rustsecp256k1_v0_1_0_fe_add(&y, &x); z = x; - secp256k1_fe_mul_int(&z, 3); + rustsecp256k1_v0_1_0_fe_mul_int(&z, 3); CHECK(check_fe_equal(&y, &z)); - secp256k1_fe_add(&y, &x); - secp256k1_fe_add(&z, &x); + rustsecp256k1_v0_1_0_fe_add(&y, &x); + rustsecp256k1_v0_1_0_fe_add(&z, &x); CHECK(check_fe_equal(&z, &y)); z = x; - secp256k1_fe_mul_int(&z, 5); - secp256k1_fe_mul(&q, &x, &fe5); + rustsecp256k1_v0_1_0_fe_mul_int(&z, 5); + rustsecp256k1_v0_1_0_fe_mul(&q, &x, &fe5); CHECK(check_fe_equal(&z, &q)); - secp256k1_fe_negate(&x, &x, 1); - secp256k1_fe_add(&z, &x); - secp256k1_fe_add(&q, &x); + rustsecp256k1_v0_1_0_fe_negate(&x, &x, 1); + rustsecp256k1_v0_1_0_fe_add(&z, &x); + rustsecp256k1_v0_1_0_fe_add(&q, &x); CHECK(check_fe_equal(&y, &z)); CHECK(check_fe_equal(&q, &y)); } } void run_field_inv(void) { - secp256k1_fe x, xi, xii; + rustsecp256k1_v0_1_0_fe x, xi, xii; int i; for (i = 0; i < 10*count; i++) { random_fe_non_zero(&x); - secp256k1_fe_inv(&xi, &x); + rustsecp256k1_v0_1_0_fe_inv(&xi, &x); CHECK(check_fe_inverse(&x, &xi)); - secp256k1_fe_inv(&xii, &xi); + rustsecp256k1_v0_1_0_fe_inv(&xii, &xi); CHECK(check_fe_equal(&x, &xii)); } } void run_field_inv_var(void) { - secp256k1_fe x, xi, xii; + rustsecp256k1_v0_1_0_fe x, xi, xii; int i; for (i = 0; i < 10*count; i++) { random_fe_non_zero(&x); - secp256k1_fe_inv_var(&xi, &x); + rustsecp256k1_v0_1_0_fe_inv_var(&xi, &x); CHECK(check_fe_inverse(&x, &xi)); - secp256k1_fe_inv_var(&xii, &xi); + rustsecp256k1_v0_1_0_fe_inv_var(&xii, &xi); CHECK(check_fe_equal(&x, &xii)); } } void run_field_inv_all_var(void) { - secp256k1_fe x[16], xi[16], xii[16]; + rustsecp256k1_v0_1_0_fe x[16], xi[16], xii[16]; int i; /* Check it's safe to call for 0 elements */ - secp256k1_fe_inv_all_var(xi, x, 0); + rustsecp256k1_v0_1_0_fe_inv_all_var(xi, x, 0); for (i = 0; i < count; i++) { size_t j; - size_t len = secp256k1_rand_int(15) + 1; + size_t len = rustsecp256k1_v0_1_0_rand_int(15) + 1; for (j = 0; j < len; j++) { random_fe_non_zero(&x[j]); } - secp256k1_fe_inv_all_var(xi, x, len); + rustsecp256k1_v0_1_0_fe_inv_all_var(xi, x, len); for (j = 0; j < len; j++) { CHECK(check_fe_inverse(&x[j], &xi[j])); } - secp256k1_fe_inv_all_var(xii, xi, len); + rustsecp256k1_v0_1_0_fe_inv_all_var(xii, xi, len); for (j = 0; j < len; j++) { CHECK(check_fe_equal(&x[j], &xii[j])); } @@ -1919,50 +1919,50 @@ void run_field_inv_all_var(void) { } void run_sqr(void) { - secp256k1_fe x, s; + rustsecp256k1_v0_1_0_fe x, s; { int i; - secp256k1_fe_set_int(&x, 1); - secp256k1_fe_negate(&x, &x, 1); + rustsecp256k1_v0_1_0_fe_set_int(&x, 1); + rustsecp256k1_v0_1_0_fe_negate(&x, &x, 1); for (i = 1; i <= 512; ++i) { - secp256k1_fe_mul_int(&x, 2); - secp256k1_fe_normalize(&x); - secp256k1_fe_sqr(&s, &x); + rustsecp256k1_v0_1_0_fe_mul_int(&x, 2); + rustsecp256k1_v0_1_0_fe_normalize(&x); + rustsecp256k1_v0_1_0_fe_sqr(&s, &x); } } } -void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) { - secp256k1_fe r1, r2; - int v = secp256k1_fe_sqrt(&r1, a); +void test_sqrt(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *k) { + rustsecp256k1_v0_1_0_fe r1, r2; + int v = rustsecp256k1_v0_1_0_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ - secp256k1_fe_negate(&r2, &r1, 1); - secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k); - secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2); - CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2)); + rustsecp256k1_v0_1_0_fe_negate(&r2, &r1, 1); + rustsecp256k1_v0_1_0_fe_add(&r1, k); rustsecp256k1_v0_1_0_fe_add(&r2, k); + rustsecp256k1_v0_1_0_fe_normalize(&r1); rustsecp256k1_v0_1_0_fe_normalize(&r2); + CHECK(rustsecp256k1_v0_1_0_fe_is_zero(&r1) || rustsecp256k1_v0_1_0_fe_is_zero(&r2)); } } void run_sqrt(void) { - secp256k1_fe ns, x, s, t; + rustsecp256k1_v0_1_0_fe ns, x, s, t; int i; /* Check sqrt(0) is 0 */ - secp256k1_fe_set_int(&x, 0); - secp256k1_fe_sqr(&s, &x); + rustsecp256k1_v0_1_0_fe_set_int(&x, 0); + rustsecp256k1_v0_1_0_fe_sqr(&s, &x); test_sqrt(&s, &x); /* Check sqrt of small squares (and their negatives) */ for (i = 1; i <= 100; i++) { - secp256k1_fe_set_int(&x, i); - secp256k1_fe_sqr(&s, &x); + rustsecp256k1_v0_1_0_fe_set_int(&x, i); + rustsecp256k1_v0_1_0_fe_sqr(&s, &x); test_sqrt(&s, &x); - secp256k1_fe_negate(&t, &s, 1); + rustsecp256k1_v0_1_0_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); } @@ -1972,11 +1972,11 @@ void run_sqrt(void) { random_fe_non_square(&ns); for (j = 0; j < count; j++) { random_fe(&x); - secp256k1_fe_sqr(&s, &x); + rustsecp256k1_v0_1_0_fe_sqr(&s, &x); test_sqrt(&s, &x); - secp256k1_fe_negate(&t, &s, 1); + rustsecp256k1_v0_1_0_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); - secp256k1_fe_mul(&t, &s, &ns); + rustsecp256k1_v0_1_0_fe_mul(&t, &s, &ns); test_sqrt(&t, NULL); } } @@ -1984,52 +1984,52 @@ void run_sqrt(void) { /***** GROUP TESTS *****/ -void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); - CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&a->y, &b->y)); } /* This compares jacobian points including their Z, not just their geometric meaning. */ -int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) { - secp256k1_gej a2; - secp256k1_gej b2; +int gej_xyz_equals_gej(const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_gej *b) { + rustsecp256k1_v0_1_0_gej a2; + rustsecp256k1_v0_1_0_gej b2; int ret = 1; ret &= a->infinity == b->infinity; if (ret && !a->infinity) { a2 = *a; b2 = *b; - secp256k1_fe_normalize(&a2.x); - secp256k1_fe_normalize(&a2.y); - secp256k1_fe_normalize(&a2.z); - secp256k1_fe_normalize(&b2.x); - secp256k1_fe_normalize(&b2.y); - secp256k1_fe_normalize(&b2.z); - ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0; - ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0; - ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0; + rustsecp256k1_v0_1_0_fe_normalize(&a2.x); + rustsecp256k1_v0_1_0_fe_normalize(&a2.y); + rustsecp256k1_v0_1_0_fe_normalize(&a2.z); + rustsecp256k1_v0_1_0_fe_normalize(&b2.x); + rustsecp256k1_v0_1_0_fe_normalize(&b2.y); + rustsecp256k1_v0_1_0_fe_normalize(&b2.z); + ret &= rustsecp256k1_v0_1_0_fe_cmp_var(&a2.x, &b2.x) == 0; + ret &= rustsecp256k1_v0_1_0_fe_cmp_var(&a2.y, &b2.y) == 0; + ret &= rustsecp256k1_v0_1_0_fe_cmp_var(&a2.z, &b2.z) == 0; } return ret; } -void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { - secp256k1_fe z2s; - secp256k1_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_gej *b) { + rustsecp256k1_v0_1_0_fe z2s; + rustsecp256k1_v0_1_0_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - secp256k1_fe_sqr(&z2s, &b->z); - secp256k1_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; secp256k1_fe_normalize_weak(&u2); - secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); - s2 = b->y; secp256k1_fe_normalize_weak(&s2); - CHECK(secp256k1_fe_equal_var(&u1, &u2)); - CHECK(secp256k1_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_1_0_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_1_0_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u2); + rustsecp256k1_v0_1_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_1_0_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&s1, &s2)); } void test_ge(void) { @@ -2046,34 +2046,34 @@ void test_ge(void) { * * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well. */ - secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs)); - secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs)); - secp256k1_fe *zinv = (secp256k1_fe *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); - secp256k1_fe zf; - secp256k1_fe zfi2, zfi3; + rustsecp256k1_v0_1_0_ge *ge = (rustsecp256k1_v0_1_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_ge) * (1 + 4 * runs)); + rustsecp256k1_v0_1_0_gej *gej = (rustsecp256k1_v0_1_0_gej *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_gej) * (1 + 4 * runs)); + rustsecp256k1_v0_1_0_fe *zinv = (rustsecp256k1_v0_1_0_fe *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_fe) * (1 + 4 * runs)); + rustsecp256k1_v0_1_0_fe zf; + rustsecp256k1_v0_1_0_fe zfi2, zfi3; - secp256k1_gej_set_infinity(&gej[0]); - secp256k1_ge_clear(&ge[0]); - secp256k1_ge_set_gej_var(&ge[0], &gej[0]); + rustsecp256k1_v0_1_0_gej_set_infinity(&gej[0]); + rustsecp256k1_v0_1_0_ge_clear(&ge[0]); + rustsecp256k1_v0_1_0_ge_set_gej_var(&ge[0], &gej[0]); for (i = 0; i < runs; i++) { int j; - secp256k1_ge g; + rustsecp256k1_v0_1_0_ge g; random_group_element_test(&g); #ifdef USE_ENDOMORPHISM if (i >= runs - 2) { - secp256k1_ge_mul_lambda(&g, &ge[1]); + rustsecp256k1_v0_1_0_ge_mul_lambda(&g, &ge[1]); } if (i >= runs - 1) { - secp256k1_ge_mul_lambda(&g, &g); + rustsecp256k1_v0_1_0_ge_mul_lambda(&g, &g); } #endif ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; - secp256k1_ge_neg(&ge[3 + 4 * i], &g); - secp256k1_ge_neg(&ge[4 + 4 * i], &g); - secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); + rustsecp256k1_v0_1_0_ge_neg(&ge[3 + 4 * i], &g); + rustsecp256k1_v0_1_0_ge_neg(&ge[4 + 4 * i], &g); + rustsecp256k1_v0_1_0_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); - secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); + rustsecp256k1_v0_1_0_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); for (j = 0; j < 4; j++) { random_field_element_magnitude(&ge[1 + j + 4 * i].x); @@ -2086,99 +2086,99 @@ void test_ge(void) { /* Compute z inverses. */ { - secp256k1_fe *zs = checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); + rustsecp256k1_v0_1_0_fe *zs = checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_fe) * (1 + 4 * runs)); for (i = 0; i < 4 * runs + 1; i++) { if (i == 0) { /* The point at infinity does not have a meaningful z inverse. Any should do. */ do { random_field_element_test(&zs[i]); - } while(secp256k1_fe_is_zero(&zs[i])); + } while(rustsecp256k1_v0_1_0_fe_is_zero(&zs[i])); } else { zs[i] = gej[i].z; } } - secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1); + rustsecp256k1_v0_1_0_fe_inv_all_var(zinv, zs, 4 * runs + 1); free(zs); } /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ do { random_field_element_test(&zf); - } while(secp256k1_fe_is_zero(&zf)); + } while(rustsecp256k1_v0_1_0_fe_is_zero(&zf)); random_field_element_magnitude(&zf); - secp256k1_fe_inv_var(&zfi3, &zf); - secp256k1_fe_sqr(&zfi2, &zfi3); - secp256k1_fe_mul(&zfi3, &zfi3, &zfi2); + rustsecp256k1_v0_1_0_fe_inv_var(&zfi3, &zf); + rustsecp256k1_v0_1_0_fe_sqr(&zfi2, &zfi3); + rustsecp256k1_v0_1_0_fe_mul(&zfi3, &zfi3, &zfi2); for (i1 = 0; i1 < 1 + 4 * runs; i1++) { int i2; for (i2 = 0; i2 < 1 + 4 * runs; i2++) { /* Compute reference result using gej + gej (var). */ - secp256k1_gej refj, resj; - secp256k1_ge ref; - secp256k1_fe zr; - secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_1_0_gej refj, resj; + rustsecp256k1_v0_1_0_ge ref; + rustsecp256k1_v0_1_0_fe zr; + rustsecp256k1_v0_1_0_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); /* Check Z ratio. */ - if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) { - secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zrz, &refj.z)); + if (!rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_1_0_gej_is_infinity(&refj)) { + rustsecp256k1_v0_1_0_fe zrz; rustsecp256k1_v0_1_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&zrz, &refj.z)); } - secp256k1_ge_set_gej_var(&ref, &refj); + rustsecp256k1_v0_1_0_ge_set_gej_var(&ref, &refj); /* Test gej + ge with Z ratio result (var). */ - secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_1_0_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); ge_equals_gej(&ref, &resj); - if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) { - secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zrz, &resj.z)); + if (!rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_1_0_gej_is_infinity(&resj)) { + rustsecp256k1_v0_1_0_fe zrz; rustsecp256k1_v0_1_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&zrz, &resj.z)); } /* Test gej + ge (var, with additional Z factor). */ { - secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ - secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); - secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); + rustsecp256k1_v0_1_0_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ + rustsecp256k1_v0_1_0_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); + rustsecp256k1_v0_1_0_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); random_field_element_magnitude(&ge2_zfi.x); random_field_element_magnitude(&ge2_zfi.y); - secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); + rustsecp256k1_v0_1_0_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); ge_equals_gej(&ref, &resj); } /* Test gej + ge (const). */ if (i2 != 0) { - /* secp256k1_gej_add_ge does not support its second argument being infinity. */ - secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]); + /* rustsecp256k1_v0_1_0_gej_add_ge does not support its second argument being infinity. */ + rustsecp256k1_v0_1_0_gej_add_ge(&resj, &gej[i1], &ge[i2]); ge_equals_gej(&ref, &resj); } /* Test doubling (var). */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { - secp256k1_fe zr2; + rustsecp256k1_v0_1_0_fe zr2; /* Normal doubling with Z ratio result. */ - secp256k1_gej_double_var(&resj, &gej[i1], &zr2); + rustsecp256k1_v0_1_0_gej_double_var(&resj, &gej[i1], &zr2); ge_equals_gej(&ref, &resj); /* Check Z ratio. */ - secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zr2, &resj.z)); + rustsecp256k1_v0_1_0_fe_mul(&zr2, &zr2, &gej[i1].z); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&zr2, &resj.z)); /* Normal doubling. */ - secp256k1_gej_double_var(&resj, &gej[i2], NULL); + rustsecp256k1_v0_1_0_gej_double_var(&resj, &gej[i2], NULL); ge_equals_gej(&ref, &resj); } /* Test adding opposites. */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { - CHECK(secp256k1_ge_is_infinity(&ref)); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&ref)); } /* Test adding infinity. */ if (i1 == 0) { - CHECK(secp256k1_ge_is_infinity(&ge[i1])); - CHECK(secp256k1_gej_is_infinity(&gej[i1])); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&ge[i1])); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i1])); ge_equals_gej(&ref, &gej[i2]); } if (i2 == 0) { - CHECK(secp256k1_ge_is_infinity(&ge[i2])); - CHECK(secp256k1_gej_is_infinity(&gej[i2])); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&ge[i2])); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&gej[i2])); ge_equals_gej(&ref, &gej[i1]); } } @@ -2186,41 +2186,41 @@ void test_ge(void) { /* Test adding all points together in random order equals infinity. */ { - secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY; - secp256k1_gej *gej_shuffled = (secp256k1_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_gej)); + rustsecp256k1_v0_1_0_gej sum = SECP256K1_GEJ_CONST_INFINITY; + rustsecp256k1_v0_1_0_gej *gej_shuffled = (rustsecp256k1_v0_1_0_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_1_0_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + secp256k1_rand_int(4 * runs + 1 - i); + int swap = i + rustsecp256k1_v0_1_0_rand_int(4 * runs + 1 - i); if (swap != i) { - secp256k1_gej t = gej_shuffled[i]; + rustsecp256k1_v0_1_0_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; gej_shuffled[swap] = t; } } for (i = 0; i < 4 * runs + 1; i++) { - secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); + rustsecp256k1_v0_1_0_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); } - CHECK(secp256k1_gej_is_infinity(&sum)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&sum)); free(gej_shuffled); } /* Test batch gej -> ge conversion with and without known z ratios. */ { - secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe)); - secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); + rustsecp256k1_v0_1_0_fe *zr = (rustsecp256k1_v0_1_0_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_1_0_fe)); + rustsecp256k1_v0_1_0_ge *ge_set_all = (rustsecp256k1_v0_1_0_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_1_0_ge)); for (i = 0; i < 4 * runs + 1; i++) { /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ if (i < 4 * runs) { - secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); + rustsecp256k1_v0_1_0_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); } } - secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); + rustsecp256k1_v0_1_0_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { - secp256k1_fe s; + rustsecp256k1_v0_1_0_fe s; random_fe_non_zero(&s); - secp256k1_gej_rescale(&gej[i], &s); + rustsecp256k1_v0_1_0_gej_rescale(&gej[i], &s); ge_equals_gej(&ge_set_all[i], &gej[i]); } free(ge_set_all); @@ -2231,13 +2231,13 @@ void test_ge(void) { for (i = 0; i < 4 * runs + 1; i++) { random_group_element_test(&ge[i]); /* randomly set half the points to infinitiy */ - if(secp256k1_fe_is_odd(&ge[i].x)) { - secp256k1_ge_set_infinity(&ge[i]); + if(rustsecp256k1_v0_1_0_fe_is_odd(&ge[i].x)) { + rustsecp256k1_v0_1_0_ge_set_infinity(&ge[i]); } - secp256k1_gej_set_ge(&gej[i], &ge[i]); + rustsecp256k1_v0_1_0_gej_set_ge(&gej[i], &ge[i]); } /* batch invert */ - secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + rustsecp256k1_v0_1_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); @@ -2279,39 +2279,39 @@ void test_add_neg_y_diff_x(void) { * print " Q: %x %x" % Q.xy() * print "P + Q: %x %x" % (P + Q).xy() */ - secp256k1_gej aj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_1_0_gej aj = SECP256K1_GEJ_CONST( 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d ); - secp256k1_gej bj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_1_0_gej bj = SECP256K1_GEJ_CONST( 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 ); - secp256k1_gej sumj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_1_0_gej sumj = SECP256K1_GEJ_CONST( 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe ); - secp256k1_ge b; - secp256k1_gej resj; - secp256k1_ge res; - secp256k1_ge_set_gej(&b, &bj); + rustsecp256k1_v0_1_0_ge b; + rustsecp256k1_v0_1_0_gej resj; + rustsecp256k1_v0_1_0_ge res; + rustsecp256k1_v0_1_0_ge_set_gej(&b, &bj); - secp256k1_gej_add_var(&resj, &aj, &bj, NULL); - secp256k1_ge_set_gej(&res, &resj); + rustsecp256k1_v0_1_0_gej_add_var(&resj, &aj, &bj, NULL); + rustsecp256k1_v0_1_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - secp256k1_gej_add_ge(&resj, &aj, &b); - secp256k1_ge_set_gej(&res, &resj); + rustsecp256k1_v0_1_0_gej_add_ge(&resj, &aj, &b); + rustsecp256k1_v0_1_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL); - secp256k1_ge_set_gej(&res, &resj); + rustsecp256k1_v0_1_0_gej_add_ge_var(&resj, &aj, &b, NULL); + rustsecp256k1_v0_1_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); } @@ -2324,26 +2324,26 @@ void run_ge(void) { } void test_ec_combine(void) { - secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_pubkey data[6]; - const secp256k1_pubkey* d[6]; - secp256k1_pubkey sd; - secp256k1_pubkey sd2; - secp256k1_gej Qj; - secp256k1_ge Q; + rustsecp256k1_v0_1_0_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_1_0_pubkey data[6]; + const rustsecp256k1_v0_1_0_pubkey* d[6]; + rustsecp256k1_v0_1_0_pubkey sd; + rustsecp256k1_v0_1_0_pubkey sd2; + rustsecp256k1_v0_1_0_gej Qj; + rustsecp256k1_v0_1_0_ge Q; int i; for (i = 1; i <= 6; i++) { - secp256k1_scalar s; + rustsecp256k1_v0_1_0_scalar s; random_scalar_order_test(&s); - secp256k1_scalar_add(&sum, &sum, &s); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(&data[i - 1], &Q); + rustsecp256k1_v0_1_0_scalar_add(&sum, &sum, &s); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); + rustsecp256k1_v0_1_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_1_0_pubkey_save(&data[i - 1], &Q); d[i - 1] = &data[i - 1]; - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(&sd, &Q); - CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); + rustsecp256k1_v0_1_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_1_0_pubkey_save(&sd, &Q); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &sd2, d, i) == 1); CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0); } } @@ -2355,32 +2355,32 @@ void run_ec_combine(void) { } } -void test_group_decompress(const secp256k1_fe* x) { +void test_group_decompress(const rustsecp256k1_v0_1_0_fe* x) { /* The input itself, normalized. */ - secp256k1_fe fex = *x; - secp256k1_fe fez; + rustsecp256k1_v0_1_0_fe fex = *x; + rustsecp256k1_v0_1_0_fe fez; /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ - secp256k1_ge ge_quad, ge_even, ge_odd; - secp256k1_gej gej_quad; + rustsecp256k1_v0_1_0_ge ge_quad, ge_even, ge_odd; + rustsecp256k1_v0_1_0_gej gej_quad; /* Return values of the above calls. */ int res_quad, res_even, res_odd; - secp256k1_fe_normalize_var(&fex); + rustsecp256k1_v0_1_0_fe_normalize_var(&fex); - res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex); - res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0); - res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1); + res_quad = rustsecp256k1_v0_1_0_ge_set_xquad(&ge_quad, &fex); + res_even = rustsecp256k1_v0_1_0_ge_set_xo_var(&ge_even, &fex, 0); + res_odd = rustsecp256k1_v0_1_0_ge_set_xo_var(&ge_odd, &fex, 1); CHECK(res_quad == res_even); CHECK(res_quad == res_odd); if (res_quad) { - secp256k1_fe_normalize_var(&ge_quad.x); - secp256k1_fe_normalize_var(&ge_odd.x); - secp256k1_fe_normalize_var(&ge_even.x); - secp256k1_fe_normalize_var(&ge_quad.y); - secp256k1_fe_normalize_var(&ge_odd.y); - secp256k1_fe_normalize_var(&ge_even.y); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_quad.x); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_odd.x); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_even.x); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_quad.y); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_odd.y); + rustsecp256k1_v0_1_0_fe_normalize_var(&ge_even.y); /* No infinity allowed. */ CHECK(!ge_quad.infinity); @@ -2388,41 +2388,41 @@ void test_group_decompress(const secp256k1_fe* x) { CHECK(!ge_odd.infinity); /* Check that the x coordinates check out. */ - CHECK(secp256k1_fe_equal_var(&ge_quad.x, x)); - CHECK(secp256k1_fe_equal_var(&ge_even.x, x)); - CHECK(secp256k1_fe_equal_var(&ge_odd.x, x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&ge_quad.x, x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&ge_even.x, x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&ge_odd.x, x)); /* Check that the Y coordinate result in ge_quad is a square. */ - CHECK(secp256k1_fe_is_quad_var(&ge_quad.y)); + CHECK(rustsecp256k1_v0_1_0_fe_is_quad_var(&ge_quad.y)); /* Check odd/even Y in ge_odd, ge_even. */ - CHECK(secp256k1_fe_is_odd(&ge_odd.y)); - CHECK(!secp256k1_fe_is_odd(&ge_even.y)); + CHECK(rustsecp256k1_v0_1_0_fe_is_odd(&ge_odd.y)); + CHECK(!rustsecp256k1_v0_1_0_fe_is_odd(&ge_even.y)); - /* Check secp256k1_gej_has_quad_y_var. */ - secp256k1_gej_set_ge(&gej_quad, &ge_quad); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + /* Check rustsecp256k1_v0_1_0_gej_has_quad_y_var. */ + rustsecp256k1_v0_1_0_gej_set_ge(&gej_quad, &ge_quad); + CHECK(rustsecp256k1_v0_1_0_gej_has_quad_y_var(&gej_quad)); do { random_fe_test(&fez); - } while (secp256k1_fe_is_zero(&fez)); - secp256k1_gej_rescale(&gej_quad, &fez); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); - secp256k1_gej_neg(&gej_quad, &gej_quad); - CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); + } while (rustsecp256k1_v0_1_0_fe_is_zero(&fez)); + rustsecp256k1_v0_1_0_gej_rescale(&gej_quad, &fez); + CHECK(rustsecp256k1_v0_1_0_gej_has_quad_y_var(&gej_quad)); + rustsecp256k1_v0_1_0_gej_neg(&gej_quad, &gej_quad); + CHECK(!rustsecp256k1_v0_1_0_gej_has_quad_y_var(&gej_quad)); do { random_fe_test(&fez); - } while (secp256k1_fe_is_zero(&fez)); - secp256k1_gej_rescale(&gej_quad, &fez); - CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); - secp256k1_gej_neg(&gej_quad, &gej_quad); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + } while (rustsecp256k1_v0_1_0_fe_is_zero(&fez)); + rustsecp256k1_v0_1_0_gej_rescale(&gej_quad, &fez); + CHECK(!rustsecp256k1_v0_1_0_gej_has_quad_y_var(&gej_quad)); + rustsecp256k1_v0_1_0_gej_neg(&gej_quad, &gej_quad); + CHECK(rustsecp256k1_v0_1_0_gej_has_quad_y_var(&gej_quad)); } } void run_group_decompress(void) { int i; for (i = 0; i < count * 4; i++) { - secp256k1_fe fe; + rustsecp256k1_v0_1_0_fe fe; random_fe_test(&fe); test_group_decompress(&fe); } @@ -2432,218 +2432,218 @@ void run_group_decompress(void) { void run_ecmult_chain(void) { /* random starting point A (on the curve) */ - secp256k1_gej a = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_1_0_gej a = SECP256K1_GEJ_CONST( 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f ); /* two random initial factors xn and gn */ - secp256k1_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_1_0_scalar xn = SECP256K1_SCALAR_CONST( 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 ); - secp256k1_scalar gn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_1_0_scalar gn = SECP256K1_SCALAR_CONST( 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de ); /* two small multipliers to be applied to xn and gn in every iteration: */ - static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); - static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); + static const rustsecp256k1_v0_1_0_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); + static const rustsecp256k1_v0_1_0_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); /* accumulators with the resulting coefficients to A and G */ - secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_1_0_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_1_0_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* actual points */ - secp256k1_gej x; - secp256k1_gej x2; + rustsecp256k1_v0_1_0_gej x; + rustsecp256k1_v0_1_0_gej x2; int i; /* the point being computed */ x = a; for (i = 0; i < 200*count; i++) { /* in each iteration, compute X = xn*X + gn*G; */ - secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - secp256k1_scalar_mul(&ae, &ae, &xn); - secp256k1_scalar_mul(&ge, &ge, &xn); - secp256k1_scalar_add(&ge, &ge, &gn); + rustsecp256k1_v0_1_0_scalar_mul(&ae, &ae, &xn); + rustsecp256k1_v0_1_0_scalar_mul(&ge, &ge, &xn); + rustsecp256k1_v0_1_0_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ - secp256k1_scalar_mul(&xn, &xn, &xf); - secp256k1_scalar_mul(&gn, &gn, &gf); + rustsecp256k1_v0_1_0_scalar_mul(&xn, &xn, &xf); + rustsecp256k1_v0_1_0_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { /* expected result after 19999 iterations */ - secp256k1_gej rp = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_1_0_gej rp = SECP256K1_GEJ_CONST( 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 ); - secp256k1_gej_neg(&rp, &rp); - secp256k1_gej_add_var(&rp, &rp, &x, NULL); - CHECK(secp256k1_gej_is_infinity(&rp)); + rustsecp256k1_v0_1_0_gej_neg(&rp, &rp); + rustsecp256k1_v0_1_0_gej_add_var(&rp, &rp, &x, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&rp)); } } /* redo the computation, but directly with the resulting ae and ge coefficients: */ - secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); - secp256k1_gej_neg(&x2, &x2); - secp256k1_gej_add_var(&x2, &x2, &x, NULL); - CHECK(secp256k1_gej_is_infinity(&x2)); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); + rustsecp256k1_v0_1_0_gej_neg(&x2, &x2); + rustsecp256k1_v0_1_0_gej_add_var(&x2, &x2, &x, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&x2)); } -void test_point_times_order(const secp256k1_gej *point) { +void test_point_times_order(const rustsecp256k1_v0_1_0_gej *point) { /* X * (point + G) + (order-X) * (pointer + G) = 0 */ - secp256k1_scalar x; - secp256k1_scalar nx; - secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_gej res1, res2; - secp256k1_ge res3; + rustsecp256k1_v0_1_0_scalar x; + rustsecp256k1_v0_1_0_scalar nx; + rustsecp256k1_v0_1_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_1_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_1_0_gej res1, res2; + rustsecp256k1_v0_1_0_ge res3; unsigned char pub[65]; size_t psize = 65; random_scalar_order_test(&x); - secp256k1_scalar_negate(&nx, &x); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ - secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ - secp256k1_gej_add_var(&res1, &res1, &res2, NULL); - CHECK(secp256k1_gej_is_infinity(&res1)); - CHECK(secp256k1_gej_is_valid_var(&res1) == 0); - secp256k1_ge_set_gej(&res3, &res1); - CHECK(secp256k1_ge_is_infinity(&res3)); - CHECK(secp256k1_ge_is_valid_var(&res3) == 0); - CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); + rustsecp256k1_v0_1_0_scalar_negate(&nx, &x); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ + rustsecp256k1_v0_1_0_gej_add_var(&res1, &res1, &res2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&res1)); + CHECK(rustsecp256k1_v0_1_0_gej_is_valid_var(&res1) == 0); + rustsecp256k1_v0_1_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&res3)); + CHECK(rustsecp256k1_v0_1_0_ge_is_valid_var(&res3) == 0); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); psize = 65; - CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); /* check zero/one edge cases */ - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); - secp256k1_ge_set_gej(&res3, &res1); - CHECK(secp256k1_ge_is_infinity(&res3)); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); - secp256k1_ge_set_gej(&res3, &res1); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); + rustsecp256k1_v0_1_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&res3)); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); + rustsecp256k1_v0_1_0_ge_set_gej(&res3, &res1); ge_equals_gej(&res3, point); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); - secp256k1_ge_set_gej(&res3, &res1); - ge_equals_ge(&res3, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); + rustsecp256k1_v0_1_0_ge_set_gej(&res3, &res1); + ge_equals_ge(&res3, &rustsecp256k1_v0_1_0_ge_const_g); } void run_point_times_order(void) { int i; - secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); - static const secp256k1_fe xr = SECP256K1_FE_CONST( + rustsecp256k1_v0_1_0_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); + static const rustsecp256k1_v0_1_0_fe xr = SECP256K1_FE_CONST( 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 ); for (i = 0; i < 500; i++) { - secp256k1_ge p; - if (secp256k1_ge_set_xo_var(&p, &x, 1)) { - secp256k1_gej j; - CHECK(secp256k1_ge_is_valid_var(&p)); - secp256k1_gej_set_ge(&j, &p); - CHECK(secp256k1_gej_is_valid_var(&j)); + rustsecp256k1_v0_1_0_ge p; + if (rustsecp256k1_v0_1_0_ge_set_xo_var(&p, &x, 1)) { + rustsecp256k1_v0_1_0_gej j; + CHECK(rustsecp256k1_v0_1_0_ge_is_valid_var(&p)); + rustsecp256k1_v0_1_0_gej_set_ge(&j, &p); + CHECK(rustsecp256k1_v0_1_0_gej_is_valid_var(&j)); test_point_times_order(&j); } - secp256k1_fe_sqr(&x, &x); + rustsecp256k1_v0_1_0_fe_sqr(&x, &x); } - secp256k1_fe_normalize_var(&x); - CHECK(secp256k1_fe_equal_var(&x, &xr)); + rustsecp256k1_v0_1_0_fe_normalize_var(&x); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&x, &xr)); } void ecmult_const_random_mult(void) { /* random starting point A (on the curve) */ - secp256k1_ge a = SECP256K1_GE_CONST( + rustsecp256k1_v0_1_0_ge a = SECP256K1_GE_CONST( 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d ); /* random initial factor xn */ - secp256k1_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_1_0_scalar xn = SECP256K1_SCALAR_CONST( 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b ); /* expected xn * A (from sage) */ - secp256k1_ge expected_b = SECP256K1_GE_CONST( + rustsecp256k1_v0_1_0_ge expected_b = SECP256K1_GE_CONST( 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 ); - secp256k1_gej b; - secp256k1_ecmult_const(&b, &a, &xn, 256); + rustsecp256k1_v0_1_0_gej b; + rustsecp256k1_v0_1_0_ecmult_const(&b, &a, &xn, 256); - CHECK(secp256k1_ge_is_valid_var(&a)); + CHECK(rustsecp256k1_v0_1_0_ge_is_valid_var(&a)); ge_equals_gej(&expected_b, &b); } void ecmult_const_commutativity(void) { - secp256k1_scalar a; - secp256k1_scalar b; - secp256k1_gej res1; - secp256k1_gej res2; - secp256k1_ge mid1; - secp256k1_ge mid2; + rustsecp256k1_v0_1_0_scalar a; + rustsecp256k1_v0_1_0_scalar b; + rustsecp256k1_v0_1_0_gej res1; + rustsecp256k1_v0_1_0_gej res2; + rustsecp256k1_v0_1_0_ge mid1; + rustsecp256k1_v0_1_0_ge mid2; random_scalar_order_test(&a); random_scalar_order_test(&b); - secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a, 256); - secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b, 256); - secp256k1_ge_set_gej(&mid1, &res1); - secp256k1_ge_set_gej(&mid2, &res2); - secp256k1_ecmult_const(&res1, &mid1, &b, 256); - secp256k1_ecmult_const(&res2, &mid2, &a, 256); - secp256k1_ge_set_gej(&mid1, &res1); - secp256k1_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_1_0_ecmult_const(&res1, &rustsecp256k1_v0_1_0_ge_const_g, &a, 256); + rustsecp256k1_v0_1_0_ecmult_const(&res2, &rustsecp256k1_v0_1_0_ge_const_g, &b, 256); + rustsecp256k1_v0_1_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_1_0_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_1_0_ecmult_const(&res1, &mid1, &b, 256); + rustsecp256k1_v0_1_0_ecmult_const(&res2, &mid2, &a, 256); + rustsecp256k1_v0_1_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_1_0_ge_set_gej(&mid2, &res2); ge_equals_ge(&mid1, &mid2); } void ecmult_const_mult_zero_one(void) { - secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_scalar negone; - secp256k1_gej res1; - secp256k1_ge res2; - secp256k1_ge point; - secp256k1_scalar_negate(&negone, &one); + rustsecp256k1_v0_1_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_1_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_1_0_scalar negone; + rustsecp256k1_v0_1_0_gej res1; + rustsecp256k1_v0_1_0_ge res2; + rustsecp256k1_v0_1_0_ge point; + rustsecp256k1_v0_1_0_scalar_negate(&negone, &one); random_group_element_test(&point); - secp256k1_ecmult_const(&res1, &point, &zero, 3); - secp256k1_ge_set_gej(&res2, &res1); - CHECK(secp256k1_ge_is_infinity(&res2)); - secp256k1_ecmult_const(&res1, &point, &one, 2); - secp256k1_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_1_0_ecmult_const(&res1, &point, &zero, 3); + rustsecp256k1_v0_1_0_ge_set_gej(&res2, &res1); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&res2)); + rustsecp256k1_v0_1_0_ecmult_const(&res1, &point, &one, 2); + rustsecp256k1_v0_1_0_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); - secp256k1_ecmult_const(&res1, &point, &negone, 256); - secp256k1_gej_neg(&res1, &res1); - secp256k1_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_1_0_ecmult_const(&res1, &point, &negone, 256); + rustsecp256k1_v0_1_0_gej_neg(&res1, &res1); + rustsecp256k1_v0_1_0_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); } void ecmult_const_chain_multiply(void) { /* Check known result (randomly generated test problem from sage) */ - const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST( + const rustsecp256k1_v0_1_0_scalar scalar = SECP256K1_SCALAR_CONST( 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b ); - const secp256k1_gej expected_point = SECP256K1_GEJ_CONST( + const rustsecp256k1_v0_1_0_gej expected_point = SECP256K1_GEJ_CONST( 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 ); - secp256k1_gej point; - secp256k1_ge res; + rustsecp256k1_v0_1_0_gej point; + rustsecp256k1_v0_1_0_ge res; int i; - secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_gej_set_ge(&point, &rustsecp256k1_v0_1_0_ge_const_g); for (i = 0; i < 100; ++i) { - secp256k1_ge tmp; - secp256k1_ge_set_gej(&tmp, &point); - secp256k1_ecmult_const(&point, &tmp, &scalar, 256); + rustsecp256k1_v0_1_0_ge tmp; + rustsecp256k1_v0_1_0_ge_set_gej(&tmp, &point); + rustsecp256k1_v0_1_0_ecmult_const(&point, &tmp, &scalar, 256); } - secp256k1_ge_set_gej(&res, &point); + rustsecp256k1_v0_1_0_ge_set_gej(&res, &point); ge_equals_gej(&res, &expected_point); } @@ -2655,18 +2655,18 @@ void run_ecmult_const_tests(void) { } typedef struct { - secp256k1_scalar *sc; - secp256k1_ge *pt; + rustsecp256k1_v0_1_0_scalar *sc; + rustsecp256k1_v0_1_0_ge *pt; } ecmult_multi_data; -static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_1_0_scalar *sc, rustsecp256k1_v0_1_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -static int ecmult_multi_false_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_false_callback(rustsecp256k1_v0_1_0_scalar *sc, rustsecp256k1_v0_1_0_ge *pt, size_t idx, void *cbdata) { (void)sc; (void)pt; (void)idx; @@ -2674,102 +2674,102 @@ static int ecmult_multi_false_callback(secp256k1_scalar *sc, secp256k1_ge *pt, s return 0; } -void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func ecmult_multi) { +void test_ecmult_multi(rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_ecmult_multi_func ecmult_multi) { int ncount; - secp256k1_scalar szero; - secp256k1_scalar sc[32]; - secp256k1_ge pt[32]; - secp256k1_gej r; - secp256k1_gej r2; + rustsecp256k1_v0_1_0_scalar szero; + rustsecp256k1_v0_1_0_scalar sc[32]; + rustsecp256k1_v0_1_0_ge pt[32]; + rustsecp256k1_v0_1_0_gej r; + rustsecp256k1_v0_1_0_gej r2; ecmult_multi_data data; data.sc = sc; data.pt = pt; - secp256k1_scalar_set_int(&szero, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&szero, 0); /* No points to multiply */ CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < count; ncount++) { - secp256k1_ge ptg; - secp256k1_gej ptgj; + rustsecp256k1_v0_1_0_ge ptg; + rustsecp256k1_v0_1_0_gej ptgj; random_scalar_order(&sc[0]); random_scalar_order(&sc[1]); random_group_element_test(&ptg); - secp256k1_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_1_0_gej_set_ge(&ptgj, &ptg); pt[0] = ptg; - pt[1] = secp256k1_ge_const_g; + pt[1] = rustsecp256k1_v0_1_0_ge_const_g; /* only G scalar */ - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); /* 1-point */ - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); /* Try to multiply 1 point, but callback returns false */ CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); /* 2-point with G scalar */ - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } /* Check infinite outputs of various forms */ for (ncount = 0; ncount < count; ncount++) { - secp256k1_ge ptg; + rustsecp256k1_v0_1_0_ge ptg; size_t i, j; size_t sizes[] = { 2, 10, 32 }; for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_scalar_order(&sc[i]); - secp256k1_ge_set_infinity(&pt[i]); + rustsecp256k1_v0_1_0_ge_set_infinity(&pt[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_group_element_test(&ptg); pt[i] = ptg; - secp256k1_scalar_set_int(&sc[i], 0); + rustsecp256k1_v0_1_0_scalar_set_int(&sc[i], 0); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { random_group_element_test(&ptg); for (i = 0; i < 16; i++) { random_scalar_order(&sc[2*i]); - secp256k1_scalar_negate(&sc[2*i + 1], &sc[2*i]); + rustsecp256k1_v0_1_0_scalar_negate(&sc[2*i + 1], &sc[2*i]); pt[2 * i] = ptg; pt[2 * i + 1] = ptg; } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); random_scalar_order(&sc[0]); for (i = 0; i < 16; i++) { @@ -2778,70 +2778,70 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e sc[2*i] = sc[0]; sc[2*i+1] = sc[0]; pt[2 * i] = ptg; - secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]); + rustsecp256k1_v0_1_0_ge_neg(&pt[2*i+1], &pt[2*i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } random_group_element_test(&ptg); - secp256k1_scalar_set_int(&sc[0], 0); + rustsecp256k1_v0_1_0_scalar_set_int(&sc[0], 0); pt[0] = ptg; for (i = 1; i < 32; i++) { pt[i] = ptg; random_scalar_order(&sc[i]); - secp256k1_scalar_add(&sc[0], &sc[0], &sc[i]); - secp256k1_scalar_negate(&sc[i], &sc[i]); + rustsecp256k1_v0_1_0_scalar_add(&sc[0], &sc[0], &sc[i]); + rustsecp256k1_v0_1_0_scalar_negate(&sc[i], &sc[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } /* Check random points, constant scalar */ for (ncount = 0; ncount < count; ncount++) { size_t i; - secp256k1_gej_set_infinity(&r); + rustsecp256k1_v0_1_0_gej_set_infinity(&r); random_scalar_order(&sc[0]); for (i = 0; i < 20; i++) { - secp256k1_ge ptg; + rustsecp256k1_v0_1_0_ge ptg; sc[i] = sc[0]; random_group_element_test(&ptg); pt[i] = ptg; - secp256k1_gej_add_ge_var(&r, &r, &pt[i], NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(&r, &r, &pt[i], NULL); } - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } /* Check random scalars, constant point */ for (ncount = 0; ncount < count; ncount++) { size_t i; - secp256k1_ge ptg; - secp256k1_gej p0j; - secp256k1_scalar rs; - secp256k1_scalar_set_int(&rs, 0); + rustsecp256k1_v0_1_0_ge ptg; + rustsecp256k1_v0_1_0_gej p0j; + rustsecp256k1_v0_1_0_scalar rs; + rustsecp256k1_v0_1_0_scalar_set_int(&rs, 0); random_group_element_test(&ptg); for (i = 0; i < 20; i++) { random_scalar_order(&sc[i]); pt[i] = ptg; - secp256k1_scalar_add(&rs, &rs, &sc[i]); + rustsecp256k1_v0_1_0_scalar_add(&rs, &rs, &sc[i]); } - secp256k1_gej_set_ge(&p0j, &pt[0]); - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); + rustsecp256k1_v0_1_0_gej_set_ge(&p0j, &pt[0]); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - secp256k1_gej_neg(&r2, &r2); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } /* Sanity check that zero scalars don't cause problems */ @@ -2850,62 +2850,62 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e random_group_element_test(&pt[ncount]); } - secp256k1_scalar_clear(&sc[0]); + rustsecp256k1_v0_1_0_scalar_clear(&sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - secp256k1_scalar_clear(&sc[1]); - secp256k1_scalar_clear(&sc[2]); - secp256k1_scalar_clear(&sc[3]); - secp256k1_scalar_clear(&sc[4]); + rustsecp256k1_v0_1_0_scalar_clear(&sc[1]); + rustsecp256k1_v0_1_0_scalar_clear(&sc[2]); + rustsecp256k1_v0_1_0_scalar_clear(&sc[3]); + rustsecp256k1_v0_1_0_scalar_clear(&sc[4]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ { const size_t TOP = 8; size_t s0i, s1i; size_t t0i, t1i; - secp256k1_ge ptg; - secp256k1_gej ptgj; + rustsecp256k1_v0_1_0_ge ptg; + rustsecp256k1_v0_1_0_gej ptgj; random_group_element_test(&ptg); - secp256k1_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_1_0_gej_set_ge(&ptgj, &ptg); for(t0i = 0; t0i < TOP; t0i++) { for(t1i = 0; t1i < TOP; t1i++) { - secp256k1_gej t0p, t1p; - secp256k1_scalar t0, t1; + rustsecp256k1_v0_1_0_gej t0p, t1p; + rustsecp256k1_v0_1_0_scalar t0, t1; - secp256k1_scalar_set_int(&t0, (t0i + 1) / 2); - secp256k1_scalar_cond_negate(&t0, t0i & 1); - secp256k1_scalar_set_int(&t1, (t1i + 1) / 2); - secp256k1_scalar_cond_negate(&t1, t1i & 1); + rustsecp256k1_v0_1_0_scalar_set_int(&t0, (t0i + 1) / 2); + rustsecp256k1_v0_1_0_scalar_cond_negate(&t0, t0i & 1); + rustsecp256k1_v0_1_0_scalar_set_int(&t1, (t1i + 1) / 2); + rustsecp256k1_v0_1_0_scalar_cond_negate(&t1, t1i & 1); - secp256k1_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero); - secp256k1_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero); for(s0i = 0; s0i < TOP; s0i++) { for(s1i = 0; s1i < TOP; s1i++) { - secp256k1_scalar tmp1, tmp2; - secp256k1_gej expected, actual; + rustsecp256k1_v0_1_0_scalar tmp1, tmp2; + rustsecp256k1_v0_1_0_gej expected, actual; - secp256k1_ge_set_gej(&pt[0], &t0p); - secp256k1_ge_set_gej(&pt[1], &t1p); + rustsecp256k1_v0_1_0_ge_set_gej(&pt[0], &t0p); + rustsecp256k1_v0_1_0_ge_set_gej(&pt[1], &t1p); - secp256k1_scalar_set_int(&sc[0], (s0i + 1) / 2); - secp256k1_scalar_cond_negate(&sc[0], s0i & 1); - secp256k1_scalar_set_int(&sc[1], (s1i + 1) / 2); - secp256k1_scalar_cond_negate(&sc[1], s1i & 1); + rustsecp256k1_v0_1_0_scalar_set_int(&sc[0], (s0i + 1) / 2); + rustsecp256k1_v0_1_0_scalar_cond_negate(&sc[0], s0i & 1); + rustsecp256k1_v0_1_0_scalar_set_int(&sc[1], (s1i + 1) / 2); + rustsecp256k1_v0_1_0_scalar_cond_negate(&sc[1], s1i & 1); - secp256k1_scalar_mul(&tmp1, &t0, &sc[0]); - secp256k1_scalar_mul(&tmp2, &t1, &sc[1]); - secp256k1_scalar_add(&tmp1, &tmp1, &tmp2); + rustsecp256k1_v0_1_0_scalar_mul(&tmp1, &t0, &sc[0]); + rustsecp256k1_v0_1_0_scalar_mul(&tmp2, &t1, &sc[1]); + rustsecp256k1_v0_1_0_scalar_add(&tmp1, &tmp1, &tmp2); - secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); - secp256k1_gej_neg(&expected, &expected); - secp256k1_gej_add_var(&actual, &actual, &expected, NULL); - CHECK(secp256k1_gej_is_infinity(&actual)); + rustsecp256k1_v0_1_0_gej_neg(&expected, &expected); + rustsecp256k1_v0_1_0_gej_add_var(&actual, &actual, &expected, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&actual)); } } } @@ -2913,28 +2913,28 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e } } -void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) { - secp256k1_scalar szero; - secp256k1_scalar sc[32]; - secp256k1_ge pt[32]; - secp256k1_gej r; +void test_ecmult_multi_batch_single(rustsecp256k1_v0_1_0_ecmult_multi_func ecmult_multi) { + rustsecp256k1_v0_1_0_scalar szero; + rustsecp256k1_v0_1_0_scalar sc[32]; + rustsecp256k1_v0_1_0_ge pt[32]; + rustsecp256k1_v0_1_0_gej r; ecmult_multi_data data; - secp256k1_scratch *scratch_empty; + rustsecp256k1_v0_1_0_scratch *scratch_empty; data.sc = sc; data.pt = pt; - secp256k1_scalar_set_int(&szero, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&szero, 0); /* Try to multiply 1 point, but scratch space is empty.*/ - scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0); + scratch_empty = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, 0); CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); - secp256k1_scratch_destroy(&ctx->error_callback, scratch_empty); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch_empty); } -void test_secp256k1_pippenger_bucket_window_inv(void) { +void test_rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(void) { int i; - CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0); + CHECK(rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { #ifdef USE_ENDOMORPHISM /* Bucket_window of 8 is not used with endo */ @@ -2942,9 +2942,9 @@ void test_secp256k1_pippenger_bucket_window_inv(void) { continue; } #endif - CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i); + CHECK(rustsecp256k1_v0_1_0_pippenger_bucket_window(rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { - CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i); + CHECK(rustsecp256k1_v0_1_0_pippenger_bucket_window(rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(i)+1) > i); } } } @@ -2954,9 +2954,9 @@ void test_secp256k1_pippenger_bucket_window_inv(void) { * for a given scratch space. */ void test_ecmult_multi_pippenger_max_points(void) { - size_t scratch_size = secp256k1_rand_int(256); - size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); - secp256k1_scratch *scratch; + size_t scratch_size = rustsecp256k1_v0_1_0_rand_int(256); + size_t max_size = rustsecp256k1_v0_1_0_pippenger_scratch_size(rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); + rustsecp256k1_v0_1_0_scratch *scratch; size_t n_points_supported; int bucket_window = 0; @@ -2964,24 +2964,24 @@ void test_ecmult_multi_pippenger_max_points(void) { size_t i; size_t total_alloc; size_t checkpoint; - scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, scratch_size); CHECK(scratch != NULL); - checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); - n_points_supported = secp256k1_pippenger_max_points(&ctx->error_callback, scratch); + checkpoint = rustsecp256k1_v0_1_0_scratch_checkpoint(&ctx->error_callback, scratch); + n_points_supported = rustsecp256k1_v0_1_0_pippenger_max_points(&ctx->error_callback, scratch); if (n_points_supported == 0) { - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); continue; } - bucket_window = secp256k1_pippenger_bucket_window(n_points_supported); + bucket_window = rustsecp256k1_v0_1_0_pippenger_bucket_window(n_points_supported); /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ - total_alloc = secp256k1_pippenger_scratch_size(n_points_supported, bucket_window); + total_alloc = rustsecp256k1_v0_1_0_pippenger_scratch_size(n_points_supported, bucket_window); for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { - CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, 1)); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&ctx->error_callback, scratch, 1)); total_alloc--; } - CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); - secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_1_0_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); + rustsecp256k1_v0_1_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } @@ -2991,152 +2991,152 @@ void test_ecmult_multi_batch_size_helper(void) { max_n_batch_points = 0; n = 1; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); max_n_batch_points = 1; n = 0; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 0); CHECK(n_batch_points == 0); max_n_batch_points = 2; n = 5; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 3); CHECK(n_batch_points == 2); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; n = ECMULT_MAX_POINTS_PER_BATCH; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 1); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; n = ECMULT_MAX_POINTS_PER_BATCH + 1; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 2); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); max_n_batch_points = 1; n = SIZE_MAX; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX); CHECK(n_batch_points == 1); max_n_batch_points = 2; n = SIZE_MAX; - CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX/2 + 1); CHECK(n_batch_points == 2); } /** - * Run secp256k1_ecmult_multi_var with num points and a scratch space restricted to + * Run rustsecp256k1_v0_1_0_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. */ void test_ecmult_multi_batching(void) { static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD; - secp256k1_scalar scG; - secp256k1_scalar szero; - secp256k1_scalar *sc = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_scalar) * n_points); - secp256k1_ge *pt = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * n_points); - secp256k1_gej r; - secp256k1_gej r2; + rustsecp256k1_v0_1_0_scalar scG; + rustsecp256k1_v0_1_0_scalar szero; + rustsecp256k1_v0_1_0_scalar *sc = (rustsecp256k1_v0_1_0_scalar *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_scalar) * n_points); + rustsecp256k1_v0_1_0_ge *pt = (rustsecp256k1_v0_1_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_1_0_ge) * n_points); + rustsecp256k1_v0_1_0_gej r; + rustsecp256k1_v0_1_0_gej r2; ecmult_multi_data data; int i; - secp256k1_scratch *scratch; + rustsecp256k1_v0_1_0_scratch *scratch; - secp256k1_gej_set_infinity(&r2); - secp256k1_scalar_set_int(&szero, 0); + rustsecp256k1_v0_1_0_gej_set_infinity(&r2); + rustsecp256k1_v0_1_0_scalar_set_int(&szero, 0); /* Get random scalars and group elements and compute result */ random_scalar_order(&scG); - secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG); for(i = 0; i < n_points; i++) { - secp256k1_ge ptg; - secp256k1_gej ptgj; + rustsecp256k1_v0_1_0_ge ptg; + rustsecp256k1_v0_1_0_gej ptgj; random_group_element_test(&ptg); - secp256k1_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_1_0_gej_set_ge(&ptgj, &ptg); pt[i] = ptg; random_scalar_order(&sc[i]); - secp256k1_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL); - secp256k1_gej_add_var(&r2, &r2, &ptgj, NULL); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL); + rustsecp256k1_v0_1_0_gej_add_var(&r2, &r2, &ptgj, NULL); } data.sc = sc; data.pt = pt; - secp256k1_gej_neg(&r2, &r2); + rustsecp256k1_v0_1_0_gej_neg(&r2, &r2); /* Test with empty scratch space. It should compute the correct result using * ecmult_mult_simple algorithm which doesn't require a scratch space. */ - scratch = secp256k1_scratch_create(&ctx->error_callback, 0); - CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, 0); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because * ecmult_multi selects strauss which requires more memory. It should * therefore select the simple algorithm. */ - scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); - CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_1_0_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { - int bucket_window = secp256k1_pippenger_bucket_window(i); - size_t scratch_size = secp256k1_pippenger_scratch_size(i, bucket_window); - scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + int bucket_window = rustsecp256k1_v0_1_0_pippenger_bucket_window(i); + size_t scratch_size = rustsecp256k1_v0_1_0_pippenger_scratch_size(i, bucket_window); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); } else { - size_t scratch_size = secp256k1_strauss_scratch_size(i); - scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + size_t scratch_size = rustsecp256k1_v0_1_0_strauss_scratch_size(i); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } - CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - secp256k1_gej_add_var(&r, &r, &r2, NULL); - CHECK(secp256k1_gej_is_infinity(&r)); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_1_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_1_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); } free(sc); free(pt); } void run_ecmult_multi_tests(void) { - secp256k1_scratch *scratch; + rustsecp256k1_v0_1_0_scratch *scratch; - test_secp256k1_pippenger_bucket_window_inv(); + test_rustsecp256k1_v0_1_0_pippenger_bucket_window_inv(); test_ecmult_multi_pippenger_max_points(); - scratch = secp256k1_scratch_create(&ctx->error_callback, 819200); - test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); - test_ecmult_multi(NULL, secp256k1_ecmult_multi_var); - test_ecmult_multi(scratch, secp256k1_ecmult_pippenger_batch_single); - test_ecmult_multi_batch_single(secp256k1_ecmult_pippenger_batch_single); - test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single); - test_ecmult_multi_batch_single(secp256k1_ecmult_strauss_batch_single); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, 819200); + test_ecmult_multi(scratch, rustsecp256k1_v0_1_0_ecmult_multi_var); + test_ecmult_multi(NULL, rustsecp256k1_v0_1_0_ecmult_multi_var); + test_ecmult_multi(scratch, rustsecp256k1_v0_1_0_ecmult_pippenger_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_1_0_ecmult_pippenger_batch_single); + test_ecmult_multi(scratch, rustsecp256k1_v0_1_0_ecmult_strauss_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_1_0_ecmult_strauss_batch_single); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ - scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); - test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_1_0_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + test_ecmult_multi(scratch, rustsecp256k1_v0_1_0_ecmult_multi_var); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } -void test_wnaf(const secp256k1_scalar *number, int w) { - secp256k1_scalar x, two, t; +void test_wnaf(const rustsecp256k1_v0_1_0_scalar *number, int w) { + rustsecp256k1_v0_1_0_scalar x, two, t; int wnaf[256]; int zeroes = -1; int i; int bits; - secp256k1_scalar_set_int(&x, 0); - secp256k1_scalar_set_int(&two, 2); - bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w); + rustsecp256k1_v0_1_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&two, 2); + bits = rustsecp256k1_v0_1_0_ecmult_wnaf(wnaf, 256, number, w); CHECK(bits <= 256); for (i = bits-1; i >= 0; i--) { int v = wnaf[i]; - secp256k1_scalar_mul(&x, &x, &two); + rustsecp256k1_v0_1_0_scalar_mul(&x, &x, &two); if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; @@ -3148,108 +3148,108 @@ void test_wnaf(const secp256k1_scalar *number, int w) { zeroes++; } if (v >= 0) { - secp256k1_scalar_set_int(&t, v); + rustsecp256k1_v0_1_0_scalar_set_int(&t, v); } else { - secp256k1_scalar_set_int(&t, -v); - secp256k1_scalar_negate(&t, &t); + rustsecp256k1_v0_1_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_1_0_scalar_negate(&t, &t); } - secp256k1_scalar_add(&x, &x, &t); + rustsecp256k1_v0_1_0_scalar_add(&x, &x, &t); } - CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */ + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&x, number)); /* check that wnaf represents number */ } -void test_constant_wnaf_negate(const secp256k1_scalar *number) { - secp256k1_scalar neg1 = *number; - secp256k1_scalar neg2 = *number; +void test_constant_wnaf_negate(const rustsecp256k1_v0_1_0_scalar *number) { + rustsecp256k1_v0_1_0_scalar neg1 = *number; + rustsecp256k1_v0_1_0_scalar neg2 = *number; int sign1 = 1; int sign2 = 1; - if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) { - secp256k1_scalar_negate(&neg1, &neg1); + if (!rustsecp256k1_v0_1_0_scalar_get_bits(&neg1, 0, 1)) { + rustsecp256k1_v0_1_0_scalar_negate(&neg1, &neg1); sign1 = -1; } - sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2)); + sign2 = rustsecp256k1_v0_1_0_scalar_cond_negate(&neg2, rustsecp256k1_v0_1_0_scalar_is_even(&neg2)); CHECK(sign1 == sign2); - CHECK(secp256k1_scalar_eq(&neg1, &neg2)); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&neg1, &neg2)); } -void test_constant_wnaf(const secp256k1_scalar *number, int w) { - secp256k1_scalar x, shift; +void test_constant_wnaf(const rustsecp256k1_v0_1_0_scalar *number, int w) { + rustsecp256k1_v0_1_0_scalar x, shift; int wnaf[256] = {0}; int i; int skew; int bits = 256; - secp256k1_scalar num = *number; + rustsecp256k1_v0_1_0_scalar num = *number; - secp256k1_scalar_set_int(&x, 0); - secp256k1_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_1_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&shift, 1 << w); /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ #ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { - secp256k1_scalar_shr_int(&num, 8); + rustsecp256k1_v0_1_0_scalar_shr_int(&num, 8); } bits = 128; #endif - skew = secp256k1_wnaf_const(wnaf, &num, w, bits); + skew = rustsecp256k1_v0_1_0_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { - secp256k1_scalar t; + rustsecp256k1_v0_1_0_scalar t; int v = wnaf[i]; CHECK(v != 0); /* check nonzero */ CHECK(v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - secp256k1_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_1_0_scalar_mul(&x, &x, &shift); if (v >= 0) { - secp256k1_scalar_set_int(&t, v); + rustsecp256k1_v0_1_0_scalar_set_int(&t, v); } else { - secp256k1_scalar_set_int(&t, -v); - secp256k1_scalar_negate(&t, &t); + rustsecp256k1_v0_1_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_1_0_scalar_negate(&t, &t); } - secp256k1_scalar_add(&x, &x, &t); + rustsecp256k1_v0_1_0_scalar_add(&x, &x, &t); } /* Skew num because when encoding numbers as odd we use an offset */ - secp256k1_scalar_cadd_bit(&num, skew == 2, 1); - CHECK(secp256k1_scalar_eq(&x, &num)); + rustsecp256k1_v0_1_0_scalar_cadd_bit(&num, skew == 2, 1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&x, &num)); } -void test_fixed_wnaf(const secp256k1_scalar *number, int w) { - secp256k1_scalar x, shift; +void test_fixed_wnaf(const rustsecp256k1_v0_1_0_scalar *number, int w) { + rustsecp256k1_v0_1_0_scalar x, shift; int wnaf[256] = {0}; int i; int skew; - secp256k1_scalar num = *number; + rustsecp256k1_v0_1_0_scalar num = *number; - secp256k1_scalar_set_int(&x, 0); - secp256k1_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_1_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&shift, 1 << w); /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ #ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { - secp256k1_scalar_shr_int(&num, 8); + rustsecp256k1_v0_1_0_scalar_shr_int(&num, 8); } #endif - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { - secp256k1_scalar t; + rustsecp256k1_v0_1_0_scalar t; int v = wnaf[i]; CHECK(v == 0 || v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - secp256k1_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_1_0_scalar_mul(&x, &x, &shift); if (v >= 0) { - secp256k1_scalar_set_int(&t, v); + rustsecp256k1_v0_1_0_scalar_set_int(&t, v); } else { - secp256k1_scalar_set_int(&t, -v); - secp256k1_scalar_negate(&t, &t); + rustsecp256k1_v0_1_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_1_0_scalar_negate(&t, &t); } - secp256k1_scalar_add(&x, &x, &t); + rustsecp256k1_v0_1_0_scalar_add(&x, &x, &t); } /* If skew is 1 then add 1 to num */ - secp256k1_scalar_cadd_bit(&num, 0, skew == 1); - CHECK(secp256k1_scalar_eq(&x, &num)); + rustsecp256k1_v0_1_0_scalar_cadd_bit(&num, 0, skew == 1); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&x, &num)); } /* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the @@ -3269,18 +3269,18 @@ void test_fixed_wnaf_small(void) { int wnaf[256] = {0}; int i; int skew; - secp256k1_scalar num; + rustsecp256k1_v0_1_0_scalar num; - secp256k1_scalar_set_int(&num, 0); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 0); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(skew == 0); - secp256k1_scalar_set_int(&num, 1); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 1); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 1; --i) { int v = wnaf[i]; CHECK(v == 0); @@ -3290,29 +3290,29 @@ void test_fixed_wnaf_small(void) { { int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf }; - secp256k1_scalar_set_int(&num, 0xffffffff); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 0xffffffff); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf }; - secp256k1_scalar_set_int(&num, 0xeeeeeeee); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 0xeeeeeeee); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 1); } { int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 }; - secp256k1_scalar_set_int(&num, 0x01010101); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 0x01010101); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 }; - secp256k1_scalar_set_int(&num, 0x01ef1ef1); - skew = secp256k1_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_1_0_scalar_set_int(&num, 0x01ef1ef1); + skew = rustsecp256k1_v0_1_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } @@ -3320,7 +3320,7 @@ void test_fixed_wnaf_small(void) { void run_wnaf(void) { int i; - secp256k1_scalar n = {{0}}; + rustsecp256k1_v0_1_0_scalar n = {{0}}; /* Sanity check: 1 and 2 are the smallest odd and even numbers and should * have easier-to-diagnose failure modes */ @@ -3338,43 +3338,43 @@ void run_wnaf(void) { test_constant_wnaf(&n, 4 + (i % 10)); test_fixed_wnaf(&n, 4 + (i % 10)); } - secp256k1_scalar_set_int(&n, 0); - CHECK(secp256k1_scalar_cond_negate(&n, 1) == -1); - CHECK(secp256k1_scalar_is_zero(&n)); - CHECK(secp256k1_scalar_cond_negate(&n, 0) == 1); - CHECK(secp256k1_scalar_is_zero(&n)); + rustsecp256k1_v0_1_0_scalar_set_int(&n, 0); + CHECK(rustsecp256k1_v0_1_0_scalar_cond_negate(&n, 1) == -1); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&n)); + CHECK(rustsecp256k1_v0_1_0_scalar_cond_negate(&n, 0) == 1); + CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&n)); } void test_ecmult_constants(void) { /* Test ecmult_gen() for [0..36) and [order-36..0). */ - secp256k1_scalar x; - secp256k1_gej r; - secp256k1_ge ng; + rustsecp256k1_v0_1_0_scalar x; + rustsecp256k1_v0_1_0_gej r; + rustsecp256k1_v0_1_0_ge ng; int i; int j; - secp256k1_ge_neg(&ng, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_ge_neg(&ng, &rustsecp256k1_v0_1_0_ge_const_g); for (i = 0; i < 36; i++ ) { - secp256k1_scalar_set_int(&x, i); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); + rustsecp256k1_v0_1_0_scalar_set_int(&x, i); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { - ge_equals_gej(&secp256k1_ge_const_g, &r); + ge_equals_gej(&rustsecp256k1_v0_1_0_ge_const_g, &r); } - secp256k1_gej_add_ge(&r, &r, &ng); + rustsecp256k1_v0_1_0_gej_add_ge(&r, &r, &ng); } - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } for (i = 1; i <= 36; i++ ) { - secp256k1_scalar_set_int(&x, i); - secp256k1_scalar_negate(&x, &x); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); + rustsecp256k1_v0_1_0_scalar_set_int(&x, i); + rustsecp256k1_v0_1_0_scalar_negate(&x, &x); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { ge_equals_gej(&ng, &r); } - secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g); + rustsecp256k1_v0_1_0_gej_add_ge(&r, &r, &rustsecp256k1_v0_1_0_ge_const_g); } - CHECK(secp256k1_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&r)); } } @@ -3384,36 +3384,36 @@ void run_ecmult_constants(void) { void test_ecmult_gen_blind(void) { /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ - secp256k1_scalar key; - secp256k1_scalar b; + rustsecp256k1_v0_1_0_scalar key; + rustsecp256k1_v0_1_0_scalar b; unsigned char seed32[32]; - secp256k1_gej pgej; - secp256k1_gej pgej2; - secp256k1_gej i; - secp256k1_ge pge; + rustsecp256k1_v0_1_0_gej pgej; + rustsecp256k1_v0_1_0_gej pgej2; + rustsecp256k1_v0_1_0_gej i; + rustsecp256k1_v0_1_0_ge pge; random_scalar_order_test(&key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); - secp256k1_rand256(seed32); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); + rustsecp256k1_v0_1_0_rand256(seed32); b = ctx->ecmult_gen_ctx.blind; i = ctx->ecmult_gen_ctx.initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); - CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); + rustsecp256k1_v0_1_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + CHECK(!rustsecp256k1_v0_1_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial)); - secp256k1_ge_set_gej(&pge, &pgej); + rustsecp256k1_v0_1_0_ge_set_gej(&pge, &pgej); ge_equals_gej(&pge, &pgej2); } void test_ecmult_gen_blind_reset(void) { /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ - secp256k1_scalar b; - secp256k1_gej initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + rustsecp256k1_v0_1_0_scalar b; + rustsecp256k1_v0_1_0_gej initial; + rustsecp256k1_v0_1_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); b = ctx->ecmult_gen_ctx.blind; initial = ctx->ecmult_gen_ctx.initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); - CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_1_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + CHECK(rustsecp256k1_v0_1_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial)); } @@ -3428,25 +3428,25 @@ void run_ecmult_gen_blind(void) { #ifdef USE_ENDOMORPHISM /***** ENDOMORPHISH TESTS *****/ void test_scalar_split(void) { - secp256k1_scalar full; - secp256k1_scalar s1, slam; + rustsecp256k1_v0_1_0_scalar full; + rustsecp256k1_v0_1_0_scalar s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; random_scalar_order_test(&full); - secp256k1_scalar_split_lambda(&s1, &slam, &full); + rustsecp256k1_v0_1_0_scalar_split_lambda(&s1, &slam, &full); /* check that both are <= 128 bits in size */ - if (secp256k1_scalar_is_high(&s1)) { - secp256k1_scalar_negate(&s1, &s1); + if (rustsecp256k1_v0_1_0_scalar_is_high(&s1)) { + rustsecp256k1_v0_1_0_scalar_negate(&s1, &s1); } - if (secp256k1_scalar_is_high(&slam)) { - secp256k1_scalar_negate(&slam, &slam); + if (rustsecp256k1_v0_1_0_scalar_is_high(&slam)) { + rustsecp256k1_v0_1_0_scalar_negate(&slam, &slam); } - secp256k1_scalar_get_b32(tmp, &s1); + rustsecp256k1_v0_1_0_scalar_get_b32(tmp, &s1); CHECK(memcmp(zero, tmp, 16) == 0); - secp256k1_scalar_get_b32(tmp, &slam); + rustsecp256k1_v0_1_0_scalar_get_b32(tmp, &slam); CHECK(memcmp(zero, tmp, 16) == 0); } @@ -3457,12 +3457,12 @@ void run_endomorphism_tests(void) { void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; - secp256k1_pubkey pubkey; - secp256k1_ge ge; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_ge ge; size_t pubkeyclen; int32_t ecount; ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { /* Smaller sizes are tested exhaustively elsewhere. */ int32_t i; @@ -3488,11 +3488,11 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); ecount = 0; - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 33); CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0); @@ -3500,14 +3500,14 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ CHECK(pubkeyo[0] == ysign); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - secp256k1_pubkey_save(&pubkey, &ge); + rustsecp256k1_v0_1_0_pubkey_save(&pubkey, &ge); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); @@ -3519,15 +3519,15 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } } } - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, NULL, NULL); } void run_ec_pubkey_parse_test(void) { @@ -3711,8 +3711,8 @@ void run_ec_pubkey_parse_test(void) { }; unsigned char sout[65]; unsigned char shortkey[2]; - secp256k1_ge ge; - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_ge ge; + rustsecp256k1_v0_1_0_pubkey pubkey; size_t len; int32_t i; int32_t ecount; @@ -3720,16 +3720,16 @@ void run_ec_pubkey_parse_test(void) { ecount = 0; /* Nothing should be reading this far into pubkeyc. */ VG_UNDEF(&pubkeyc[65], 1); - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); /* Zero length claimed, fail, zeroize, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(shortkey, 2); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Length one claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 256 ; i++) { @@ -3738,10 +3738,10 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i; VG_UNDEF(&shortkey[1], 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } /* Length two claimed, fail, zeroize, no illegal arg error. */ @@ -3751,101 +3751,101 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i & 255; shortkey[1] = i >> 8; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); CHECK(ecount == 2); /* NULL input string. Illegal arg and zeroize output. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 1); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 2); /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Valid parse. */ memset(&pubkey, 0, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); - CHECK(secp256k1_ec_pubkey_parse(secp256k1_context_no_precomp, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(rustsecp256k1_v0_1_0_context_no_precomp, &pubkey, pubkeyc, 65) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); VG_UNDEF(&ge, sizeof(ge)); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_pubkey_load(ctx, &ge, &pubkey) == 1); VG_CHECK(&ge.x, sizeof(ge.x)); VG_CHECK(&ge.y, sizeof(ge.y)); VG_CHECK(&ge.infinity, sizeof(ge.infinity)); - ge_equals_ge(&secp256k1_ge_const_g, &ge); + ge_equals_ge(&rustsecp256k1_v0_1_0_ge_const_g, &ge); CHECK(ecount == 0); - /* secp256k1_ec_pubkey_serialize illegal args. */ + /* rustsecp256k1_v0_1_0_ec_pubkey_serialize illegal args. */ ecount = 0; len = 65; - CHECK(secp256k1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 1); CHECK(len == 0); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 2); len = 65; VG_UNDEF(sout, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); VG_CHECK(sout, 65); CHECK(ecount == 3); CHECK(len == 0); len = 65; - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); CHECK(ecount == 4); CHECK(len == 0); len = 65; VG_UNDEF(sout, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(sout, 65); CHECK(ecount == 4); CHECK(len == 65); /* Multiple illegal args. Should still set arg error only once. */ ecount = 0; ecount2 = 11; - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); /* Does the illegal arg callback actually change the behavior? */ - secp256k1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); CHECK(ecount2 == 10); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, NULL, NULL); /* Try a bunch of prefabbed points with all possible encodings. */ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { ec_pubkey_parse_pointtest(valid[i], 1, 1); @@ -3865,263 +3865,263 @@ void run_eckey_edge_case_test(void) { 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - const unsigned char zeros[sizeof(secp256k1_pubkey)] = {0x00}; + const unsigned char zeros[sizeof(rustsecp256k1_v0_1_0_pubkey)] = {0x00}; unsigned char ctmp[33]; unsigned char ctmp2[33]; - secp256k1_pubkey pubkey; - secp256k1_pubkey pubkey2; - secp256k1_pubkey pubkey_one; - secp256k1_pubkey pubkey_negone; - const secp256k1_pubkey *pubkeys[3]; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey2; + rustsecp256k1_v0_1_0_pubkey pubkey_one; + rustsecp256k1_v0_1_0_pubkey pubkey_negone; + const rustsecp256k1_v0_1_0_pubkey *pubkeys[3]; size_t len; int32_t ecount; /* Group order is too large, reject. */ - CHECK(secp256k1_ec_seckey_verify(ctx, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, orderc) == 0); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, orderc) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x42; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, ctmp, ctmp2) == 1); CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, ctmp, ctmp2) == 0); CHECK(memcmp(zeros, ctmp, 32) == 0); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Overflowing key tweak zeroizes. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, ctmp, orderc) == 0); CHECK(memcmp(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, ctmp, orderc) == 0); CHECK(memcmp(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Private key tweaks results in a key of zero. */ ctmp2[31] = 1; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 0); CHECK(memcmp(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 1); CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 1; - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 2; - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Test argument errors. */ ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); CHECK(ecount == 0); /* Zeroize pubkey on parse error. */ memset(&pubkey, 0, 32); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(ecount == 1); CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); CHECK(ecount == 2); CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ ecount = 0; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, ctmp) == 1); CHECK(ecount == 0); - CHECK(secp256k1_ec_seckey_verify(ctx, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, NULL) == 0); CHECK(ecount == 1); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; - CHECK(secp256k1_ec_pubkey_create(ctx, NULL, ctmp) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, NULL, ctmp) == 0); CHECK(ecount == 1); memset(&pubkey, 1, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* secp256k1_ec_pubkey_combine tests. */ + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); + /* rustsecp256k1_v0_1_0_ec_pubkey_combine tests. */ ecount = 0; pubkeys[0] = &pubkey_one; - VG_UNDEF(&pubkeys[0], sizeof(secp256k1_pubkey *)); - VG_UNDEF(&pubkeys[1], sizeof(secp256k1_pubkey *)); - VG_UNDEF(&pubkeys[2], sizeof(secp256k1_pubkey *)); - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + VG_UNDEF(&pubkeys[0], sizeof(rustsecp256k1_v0_1_0_pubkey *)); + VG_UNDEF(&pubkeys[1], sizeof(rustsecp256k1_v0_1_0_pubkey *)); + VG_UNDEF(&pubkeys[2], sizeof(rustsecp256k1_v0_1_0_pubkey *)); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); CHECK(ecount == 2); - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); CHECK(ecount == 3); pubkeys[0] = &pubkey_negone; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); CHECK(memcmp(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) == 0); CHECK(ecount == 3); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); CHECK(memcmp(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_1_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_1_0_pubkey)); + CHECK(memcmp(&pubkey, zeros, sizeof(rustsecp256k1_v0_1_0_pubkey)) > 0); CHECK(ecount == 3); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, NULL, NULL); } -void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) { - secp256k1_scalar nonce; +void random_sign(rustsecp256k1_v0_1_0_scalar *sigr, rustsecp256k1_v0_1_0_scalar *sigs, const rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *msg, int *recid) { + rustsecp256k1_v0_1_0_scalar nonce; do { random_scalar_order_test(&nonce); - } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); + } while(!rustsecp256k1_v0_1_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); } void test_ecdsa_sign_verify(void) { - secp256k1_gej pubj; - secp256k1_ge pub; - secp256k1_scalar one; - secp256k1_scalar msg, key; - secp256k1_scalar sigr, sigs; + rustsecp256k1_v0_1_0_gej pubj; + rustsecp256k1_v0_1_0_ge pub; + rustsecp256k1_v0_1_0_scalar one; + rustsecp256k1_v0_1_0_scalar msg, key; + rustsecp256k1_v0_1_0_scalar sigr, sigs; int recid; int getrec; random_scalar_order_test(&msg); random_scalar_order_test(&key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); - secp256k1_ge_set_gej(&pub, &pubj); - getrec = secp256k1_rand_bits(1); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_1_0_ge_set_gej(&pub, &pubj); + getrec = rustsecp256k1_v0_1_0_rand_bits(1); random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL); if (getrec) { CHECK(recid >= 0 && recid < 4); } - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - secp256k1_scalar_set_int(&one, 1); - secp256k1_scalar_add(&msg, &msg, &one); - CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + rustsecp256k1_v0_1_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_1_0_scalar_add(&msg, &msg, &one); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); } void run_ecdsa_sign_verify(void) { @@ -4178,9 +4178,9 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); } -int is_empty_signature(const secp256k1_ecdsa_signature *sig) { - static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0}; - return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0; +int is_empty_signature(const rustsecp256k1_v0_1_0_ecdsa_signature *sig) { + static const unsigned char res[sizeof(rustsecp256k1_v0_1_0_ecdsa_signature)] = {0}; + return memcmp(sig, res, sizeof(rustsecp256k1_v0_1_0_ecdsa_signature)) == 0; } void test_ecdsa_end_to_end(void) { @@ -4188,90 +4188,90 @@ void test_ecdsa_end_to_end(void) { unsigned char privkey[32]; unsigned char message[32]; unsigned char privkey2[32]; - secp256k1_ecdsa_signature signature[6]; - secp256k1_scalar r, s; + rustsecp256k1_v0_1_0_ecdsa_signature signature[6]; + rustsecp256k1_v0_1_0_scalar r, s; unsigned char sig[74]; size_t siglen = 74; unsigned char pubkeyc[65]; size_t pubkeyclen = 65; - secp256k1_pubkey pubkey; - secp256k1_pubkey pubkey_tmp; + rustsecp256k1_v0_1_0_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; /* Generate a random key and message. */ { - secp256k1_scalar msg, key; + rustsecp256k1_v0_1_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_scalar_get_b32(message, &msg); + rustsecp256k1_v0_1_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_1_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_1_0_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); - CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1)); + CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, rustsecp256k1_v0_1_0_rand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); CHECK(memcmp(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ - if (secp256k1_rand_int(3) == 0) { + if (rustsecp256k1_v0_1_0_rand_int(3) == 0) { int ret1; int ret2; unsigned char rnd[32]; - secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); - ret1 = secp256k1_ec_privkey_tweak_add(ctx, privkey, rnd); - ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd); + rustsecp256k1_v0_1_0_pubkey pubkey2; + rustsecp256k1_v0_1_0_rand256_test(rnd); + ret1 = rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, rnd); CHECK(ret1 == ret2); if (ret1 == 0) { return; } - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ - if (secp256k1_rand_int(3) == 0) { + if (rustsecp256k1_v0_1_0_rand_int(3) == 0) { int ret1; int ret2; unsigned char rnd[32]; - secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); - ret1 = secp256k1_ec_privkey_tweak_mul(ctx, privkey, rnd); - ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); + rustsecp256k1_v0_1_0_pubkey pubkey2; + rustsecp256k1_v0_1_0_rand256_test(rnd); + ret1 = rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); CHECK(ret1 == ret2); if (ret1 == 0) { return; } - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ - CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0); CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0); CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0); @@ -4280,71 +4280,71 @@ void test_ecdsa_end_to_end(void) { CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0); CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ - CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); /* Test lower-S form, malleate, verify and fail, test again, malleate again */ - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[0])); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &signature[0]); - secp256k1_scalar_negate(&s, &s); - secp256k1_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); - CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - secp256k1_scalar_negate(&s, &s); - secp256k1_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, NULL, &signature[0])); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, &signature[0]); + rustsecp256k1_v0_1_0_scalar_negate(&s, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + rustsecp256k1_v0_1_0_scalar_negate(&s, &s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); CHECK(memcmp(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); memset(&signature[0], 0, sizeof(signature[0])); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); /* Serialize/destroy/parse DER and verify again. */ siglen = 74; - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || - secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + sig[rustsecp256k1_v0_1_0_rand_int(siglen)] += 1 + rustsecp256k1_v0_1_0_rand_int(255); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || + rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); } void test_random_pubkeys(void) { - secp256k1_ge elem; - secp256k1_ge elem2; + rustsecp256k1_v0_1_0_ge elem; + rustsecp256k1_v0_1_0_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ - size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33; - if (secp256k1_rand_bits(2) == 0) { - len = secp256k1_rand_bits(6); + size_t len = rustsecp256k1_v0_1_0_rand_bits(2) == 0 ? 65 : 33; + if (rustsecp256k1_v0_1_0_rand_bits(2) == 0) { + len = rustsecp256k1_v0_1_0_rand_bits(6); } if (len == 65) { - in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7); + in[0] = rustsecp256k1_v0_1_0_rand_bits(1) ? 4 : (rustsecp256k1_v0_1_0_rand_bits(1) ? 6 : 7); } else { - in[0] = secp256k1_rand_bits(1) ? 2 : 3; + in[0] = rustsecp256k1_v0_1_0_rand_bits(1) ? 2 : 3; } - if (secp256k1_rand_bits(3) == 0) { - in[0] = secp256k1_rand_bits(8); + if (rustsecp256k1_v0_1_0_rand_bits(3) == 0) { + in[0] = rustsecp256k1_v0_1_0_rand_bits(8); } if (len > 1) { - secp256k1_rand256(&in[1]); + rustsecp256k1_v0_1_0_rand256(&in[1]); } if (len > 33) { - secp256k1_rand256(&in[33]); + rustsecp256k1_v0_1_0_rand256(&in[33]); } - if (secp256k1_eckey_pubkey_parse(&elem, in, len)) { + if (rustsecp256k1_v0_1_0_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; unsigned char firstb; int res; size_t size = len; firstb = in[0]; /* If the pubkey can be parsed, it should round-trip... */ - CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); CHECK(memcmp(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ @@ -4352,13 +4352,13 @@ void test_random_pubkeys(void) { CHECK(in[0] == out[0]); } size = 65; - CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&elem, in, &size, 0)); CHECK(size == 65); - CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&elem2, in, size)); ge_equals_ge(&elem,&elem2); /* Check that the X9.62 hybrid type is checked. */ - in[0] = secp256k1_rand_bits(1) ? 6 : 7; - res = secp256k1_eckey_pubkey_parse(&elem2, in, size); + in[0] = rustsecp256k1_v0_1_0_rand_bits(1) ? 6 : 7; + res = rustsecp256k1_v0_1_0_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { CHECK(res); @@ -4368,7 +4368,7 @@ void test_random_pubkeys(void) { } if (res) { ge_equals_ge(&elem,&elem2); - CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_serialize(&elem, out, &size, 0)); CHECK(memcmp(&in[1], &out[1], 64) == 0); } } @@ -4401,13 +4401,13 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ int ret = 0; - secp256k1_ecdsa_signature sig_der; + rustsecp256k1_v0_1_0_ecdsa_signature sig_der; unsigned char roundtrip_der[2048]; unsigned char compact_der[64]; size_t len_der = 2048; int parsed_der = 0, valid_der = 0, roundtrips_der = 0; - secp256k1_ecdsa_signature sig_der_lax; + rustsecp256k1_v0_1_0_ecdsa_signature sig_der_lax; unsigned char roundtrip_der_lax[2048]; unsigned char compact_der_lax[64]; size_t len_der_lax = 2048; @@ -4422,23 +4422,23 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0; #endif - parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); + parsed_der = rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); if (parsed_der) { - ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; + ret |= (!rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { - ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; + ret |= (!rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0; } - parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); + parsed_der_lax = rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); if (parsed_der_lax) { - ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; + ret |= (!rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { - ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; + ret |= (!rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0; } @@ -4516,27 +4516,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; - int action = secp256k1_rand_bits(3); + int action = rustsecp256k1_v0_1_0_rand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ - pos = secp256k1_rand_int(*len); + pos = rustsecp256k1_v0_1_0_rand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ - pos = secp256k1_rand_int(1 + *len); + pos = rustsecp256k1_v0_1_0_rand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = secp256k1_rand_bits(8); + sig[pos] = rustsecp256k1_v0_1_0_rand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ - sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255); + sig[rustsecp256k1_v0_1_0_rand_int(*len)] += 1 + rustsecp256k1_v0_1_0_rand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ - sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3); + sig[rustsecp256k1_v0_1_0_rand_int(*len)] ^= 1 << rustsecp256k1_v0_1_0_rand_bits(3); return; } } @@ -4549,23 +4549,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly int n; *len = 0; - der = secp256k1_rand_bits(2) == 0; + der = rustsecp256k1_v0_1_0_rand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; - indet = der ? 0 : secp256k1_rand_int(10) == 0; + indet = der ? 0 : rustsecp256k1_v0_1_0_rand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0); + nlow[n] = der ? 1 : (rustsecp256k1_v0_1_0_rand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8; + nlen[n] = nlow[n] ? rustsecp256k1_v0_1_0_rand_int(33) : 32 + rustsecp256k1_v0_1_0_rand_int(200) * rustsecp256k1_v0_1_0_rand_int(8) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1)); + nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_1_0_rand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127)); + nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_1_0_rand_bits(7) : 1 + rustsecp256k1_v0_1_0_rand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8); + nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_1_0_rand_int(3) : rustsecp256k1_v0_1_0_rand_int(300 - nlen[n]) * rustsecp256k1_v0_1_0_rand_int(8) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } @@ -4574,7 +4574,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ - int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; + int add = rustsecp256k1_v0_1_0_rand_int(127 - nlenlen[n]) * rustsecp256k1_v0_1_0_rand_int(16) * rustsecp256k1_v0_1_0_rand_int(16) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; @@ -4588,7 +4588,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8; + elen = (der || indet) ? 0 : rustsecp256k1_v0_1_0_rand_int(980 - tlen) * rustsecp256k1_v0_1_0_rand_int(8) / 8; if (elen != 0) { *certainly_not_der = 1; } @@ -4596,7 +4596,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8; + glen = der ? 0 : rustsecp256k1_v0_1_0_rand_int(990 - tlen) * rustsecp256k1_v0_1_0_rand_int(8) / 8; if (glen != 0) { *certainly_not_der = 1; } @@ -4611,7 +4611,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { - int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; + int add = rustsecp256k1_v0_1_0_rand_int(127 - tlenlen) * rustsecp256k1_v0_1_0_rand_int(16) * rustsecp256k1_v0_1_0_rand_int(16) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; @@ -4662,13 +4662,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlen[n]--; } /* Generate remaining random bytes of number */ - secp256k1_rand_bytes_test(sig + *len, nlen[n]); + rustsecp256k1_v0_1_0_rand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ - secp256k1_rand_bytes_test(sig + *len, elen); + rustsecp256k1_v0_1_0_rand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ @@ -4680,7 +4680,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ - secp256k1_rand_bytes_test(sig + *len, glen); + rustsecp256k1_v0_1_0_rand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); @@ -4721,22 +4721,22 @@ void run_ecdsa_der_parse(void) { /* Tests several edge cases. */ void test_ecdsa_edge_cases(void) { int t; - secp256k1_ecdsa_signature sig; + rustsecp256k1_v0_1_0_ecdsa_signature sig; /* Test the case where ECDSA recomputes a point that is infinity. */ { - secp256k1_gej keyj; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_negate(&ss, &ss); - secp256k1_scalar_inverse(&ss, &ss); - secp256k1_scalar_set_int(&sr, 1); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); - secp256k1_ge_set_gej(&key, &keyj); + rustsecp256k1_v0_1_0_gej keyj; + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_1_0_scalar_negate(&ss, &ss); + rustsecp256k1_v0_1_0_scalar_inverse(&ss, &ss); + rustsecp256k1_v0_1_0_scalar_set_int(&sr, 1); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); + rustsecp256k1_v0_1_0_ge_set_gej(&key, &keyj); msg = ss; - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with r of zero fails. */ @@ -4748,14 +4748,14 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 0); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&sr, 0); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with s of zero fails. */ @@ -4767,14 +4767,14 @@ void test_ecdsa_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 0); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 1); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&sr, 1); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with message 0 passes. */ @@ -4793,23 +4793,23 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x43 }; - secp256k1_ge key; - secp256k1_ge key2; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 2); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 2); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_set_int(&ss, 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_ge key2; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_1_0_scalar_set_int(&sr, 2); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message 1 passes. */ @@ -4834,24 +4834,24 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb }; - secp256k1_ge key; - secp256k1_ge key2; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 1); - secp256k1_scalar_set_b32(&sr, csr, NULL); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_set_int(&ss, 2); - secp256k1_scalar_inverse_var(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_ge key2; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_1_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_1_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message -1 passes. */ @@ -4869,25 +4869,25 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 1); - secp256k1_scalar_negate(&msg, &msg); - secp256k1_scalar_set_b32(&sr, csr, NULL); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - secp256k1_scalar_set_int(&ss, 3); - secp256k1_scalar_inverse_var(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_1_0_ge key; + rustsecp256k1_v0_1_0_scalar msg; + rustsecp256k1_v0_1_0_scalar sr, ss; + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_1_0_scalar_negate(&msg, &msg); + rustsecp256k1_v0_1_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_1_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_1_0_scalar_set_int(&ss, 3); + rustsecp256k1_v0_1_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Signature where s would be zero. */ { - secp256k1_pubkey pubkey; + rustsecp256k1_v0_1_0_pubkey pubkey; size_t siglen; int32_t ecount; unsigned char signature[72]; @@ -4916,71 +4916,71 @@ void test_ecdsa_edge_cases(void) { 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, }; ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); msg[31] = 0xaa; - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); CHECK(ecount == 0); - CHECK(secp256k1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, key) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, key) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg, NULL) == 0); CHECK(ecount == 6); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); CHECK(ecount == 6); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 7); /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); CHECK(ecount == 8); siglen = 72; - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); CHECK(ecount == 9); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); CHECK(ecount == 10); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); CHECK(ecount == 11); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); CHECK(ecount == 11); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); CHECK(ecount == 12); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); CHECK(ecount == 13); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); CHECK(ecount == 13); siglen = 10; /* Too little room for a signature does not fail via ARGCHECK. */ - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); CHECK(ecount == 13); ecount = 0; - CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); CHECK(ecount == 5); memset(signature, 255, 64); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); CHECK(ecount == 5); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_1_0_context_set_illegal_callback(ctx, NULL, NULL); } /* Nonce function corner cases. */ @@ -4989,43 +4989,43 @@ void test_ecdsa_edge_cases(void) { int i; unsigned char key[32]; unsigned char msg[32]; - secp256k1_ecdsa_signature sig2; - secp256k1_scalar sr[512], ss; + rustsecp256k1_v0_1_0_ecdsa_signature sig2; + rustsecp256k1_v0_1_0_scalar sr[512], ss; const unsigned char *extra; extra = t == 0 ? NULL : zero; memset(msg, 0, 32); msg[31] = 1; /* High key results in signature failure. */ memset(key, 0xFF, 32); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Zero key results in signature failure. */ memset(key, 0, 32); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Nonce function failure results in signature failure. */ key[31] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); CHECK(is_empty_signature(&sig)); /* The retry loop successfully makes its way to the first good value. */ - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); CHECK(!is_empty_signature(&sig)); - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; msg[0] = i; - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_1_0_scalar_eq(&sr[i], &sr[j])); } } msg[0] = 0; @@ -5034,11 +5034,11 @@ void test_ecdsa_edge_cases(void) { for(i = 256; i < 512; i++) { int j; key[0] = i - 256; - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_1_0_scalar_eq(&sr[i], &sr[j])); } } key[0] = 0; @@ -5097,7 +5097,7 @@ EC_KEY *get_openssl_key(const unsigned char *key32) { unsigned char privkey[300]; size_t privkeylen; const unsigned char* pbegin = privkey; - int compr = secp256k1_rand_bits(1); + int compr = rustsecp256k1_v0_1_0_rand_bits(1); EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1); CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr)); CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen)); @@ -5106,35 +5106,35 @@ EC_KEY *get_openssl_key(const unsigned char *key32) { } void test_ecdsa_openssl(void) { - secp256k1_gej qj; - secp256k1_ge q; - secp256k1_scalar sigr, sigs; - secp256k1_scalar one; - secp256k1_scalar msg2; - secp256k1_scalar key, msg; + rustsecp256k1_v0_1_0_gej qj; + rustsecp256k1_v0_1_0_ge q; + rustsecp256k1_v0_1_0_scalar sigr, sigs; + rustsecp256k1_v0_1_0_scalar one; + rustsecp256k1_v0_1_0_scalar msg2; + rustsecp256k1_v0_1_0_scalar key, msg; EC_KEY *ec_key; unsigned int sigsize = 80; size_t secp_sigsize = 80; unsigned char message[32]; unsigned char signature[80]; unsigned char key32[32]; - secp256k1_rand256_test(message); - secp256k1_scalar_set_b32(&msg, message, NULL); + rustsecp256k1_v0_1_0_rand256_test(message); + rustsecp256k1_v0_1_0_scalar_set_b32(&msg, message, NULL); random_scalar_order_test(&key); - secp256k1_scalar_get_b32(key32, &key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); - secp256k1_ge_set_gej(&q, &qj); + rustsecp256k1_v0_1_0_scalar_get_b32(key32, &key); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); + rustsecp256k1_v0_1_0_ge_set_gej(&q, &qj); ec_key = get_openssl_key(key32); CHECK(ec_key != NULL); CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key)); - CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); - secp256k1_scalar_set_int(&one, 1); - secp256k1_scalar_add(&msg2, &msg, &one); - CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); + rustsecp256k1_v0_1_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_1_0_scalar_add(&msg2, &msg, &one); + CHECK(!rustsecp256k1_v0_1_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); random_sign(&sigr, &sigs, &key, &msg, NULL); - CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); + CHECK(rustsecp256k1_v0_1_0_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1); EC_KEY_free(ec_key); @@ -5196,7 +5196,7 @@ int main(int argc, char **argv) { fclose(frand); } } - secp256k1_rand_seed(seed16); + rustsecp256k1_v0_1_0_rand_seed(seed16); printf("test count = %i\n", count); printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); @@ -5205,10 +5205,10 @@ int main(int argc, char **argv) { run_context_tests(0); run_context_tests(1); run_scratch_tests(); - ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - if (secp256k1_rand_bits(1)) { - secp256k1_rand256(run32); - CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL)); + ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + if (rustsecp256k1_v0_1_0_rand_bits(1)) { + rustsecp256k1_v0_1_0_rand256(run32); + CHECK(rustsecp256k1_v0_1_0_context_randomize(ctx, rustsecp256k1_v0_1_0_rand_bits(1) ? run32 : NULL)); } run_rand_bits(); @@ -5280,11 +5280,11 @@ int main(int argc, char **argv) { run_recovery_tests(); #endif - secp256k1_rand256(run32); + rustsecp256k1_v0_1_0_rand256(run32); printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); /* shutdown */ - secp256k1_context_destroy(ctx); + rustsecp256k1_v0_1_0_context_destroy(ctx); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c index b44e357..8eda9ed 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c +++ b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c @@ -32,47 +32,47 @@ #endif /** stolen from tests.c */ -void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); - CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&a->y, &b->y)); } -void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { - secp256k1_fe z2s; - secp256k1_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_gej *b) { + rustsecp256k1_v0_1_0_fe z2s; + rustsecp256k1_v0_1_0_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - secp256k1_fe_sqr(&z2s, &b->z); - secp256k1_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; secp256k1_fe_normalize_weak(&u2); - secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); - s2 = b->y; secp256k1_fe_normalize_weak(&s2); - CHECK(secp256k1_fe_equal_var(&u1, &u2)); - CHECK(secp256k1_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_1_0_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_1_0_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u2); + rustsecp256k1_v0_1_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_1_0_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&s1, &s2)); } -void random_fe(secp256k1_fe *x) { +void random_fe(rustsecp256k1_v0_1_0_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256(bin); - if (secp256k1_fe_set_b32(x, bin)) { + rustsecp256k1_v0_1_0_rand256(bin); + if (rustsecp256k1_v0_1_0_fe_set_b32(x, bin)) { return; } } while(1); } /** END stolen from tests.c */ -int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +int rustsecp256k1_v0_1_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { - secp256k1_scalar s; + rustsecp256k1_v0_1_0_scalar s; int *idata = data; (void)msg32; (void)key32; @@ -84,97 +84,97 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha if (attempt > 0) { *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } - secp256k1_scalar_set_int(&s, *idata); - secp256k1_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_1_0_scalar_set_int(&s, *idata); + rustsecp256k1_v0_1_0_scalar_get_b32(nonce32, &s); return 1; } #ifdef USE_ENDOMORPHISM -void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { +void test_exhaustive_endomorphism(const rustsecp256k1_v0_1_0_ge *group, int order) { int i; for (i = 0; i < order; i++) { - secp256k1_ge res; - secp256k1_ge_mul_lambda(&res, &group[i]); + rustsecp256k1_v0_1_0_ge res; + rustsecp256k1_v0_1_0_ge_mul_lambda(&res, &group[i]); ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); } } #endif -void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { +void test_exhaustive_addition(const rustsecp256k1_v0_1_0_ge *group, const rustsecp256k1_v0_1_0_gej *groupj, int order) { int i, j; /* Sanity-check (and check infinity functions) */ - CHECK(secp256k1_ge_is_infinity(&group[0])); - CHECK(secp256k1_gej_is_infinity(&groupj[0])); + CHECK(rustsecp256k1_v0_1_0_ge_is_infinity(&group[0])); + CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&groupj[0])); for (i = 1; i < order; i++) { - CHECK(!secp256k1_ge_is_infinity(&group[i])); - CHECK(!secp256k1_gej_is_infinity(&groupj[i])); + CHECK(!rustsecp256k1_v0_1_0_ge_is_infinity(&group[i])); + CHECK(!rustsecp256k1_v0_1_0_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ for (j = 0; j < order; j++) { - secp256k1_fe fe_inv; - secp256k1_fe_inv(&fe_inv, &groupj[j].z); + rustsecp256k1_v0_1_0_fe fe_inv; + rustsecp256k1_v0_1_0_fe_inv(&fe_inv, &groupj[j].z); for (i = 0; i < order; i++) { - secp256k1_ge zless_gej; - secp256k1_gej tmp; + rustsecp256k1_v0_1_0_ge zless_gej; + rustsecp256k1_v0_1_0_gej tmp; /* add_var */ - secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + rustsecp256k1_v0_1_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); ge_equals_gej(&group[(i + j) % order], &tmp); /* add_ge */ if (j > 0) { - secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); + rustsecp256k1_v0_1_0_gej_add_ge(&tmp, &groupj[i], &group[j]); ge_equals_gej(&group[(i + j) % order], &tmp); } /* add_ge_var */ - secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + rustsecp256k1_v0_1_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); ge_equals_gej(&group[(i + j) % order], &tmp); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; - secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + rustsecp256k1_v0_1_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); ge_equals_gej(&group[(i + j) % order], &tmp); } } /* Check doubling */ for (i = 0; i < order; i++) { - secp256k1_gej tmp; + rustsecp256k1_v0_1_0_gej tmp; if (i > 0) { - secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL); + rustsecp256k1_v0_1_0_gej_double_nonzero(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % order], &tmp); } - secp256k1_gej_double_var(&tmp, &groupj[i], NULL); + rustsecp256k1_v0_1_0_gej_double_var(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % order], &tmp); } /* Check negation */ for (i = 1; i < order; i++) { - secp256k1_ge tmp; - secp256k1_gej tmpj; - secp256k1_ge_neg(&tmp, &group[i]); + rustsecp256k1_v0_1_0_ge tmp; + rustsecp256k1_v0_1_0_gej tmpj; + rustsecp256k1_v0_1_0_ge_neg(&tmp, &group[i]); ge_equals_ge(&group[order - i], &tmp); - secp256k1_gej_neg(&tmpj, &groupj[i]); + rustsecp256k1_v0_1_0_gej_neg(&tmpj, &groupj[i]); ge_equals_gej(&group[order - i], &tmpj); } } -void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { +void test_exhaustive_ecmult(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, const rustsecp256k1_v0_1_0_gej *groupj, int order) { int i, j, r_log; for (r_log = 1; r_log < order; r_log++) { for (j = 0; j < order; j++) { for (i = 0; i < order; i++) { - secp256k1_gej tmp; - secp256k1_scalar na, ng; - secp256k1_scalar_set_int(&na, i); - secp256k1_scalar_set_int(&ng, j); + rustsecp256k1_v0_1_0_gej tmp; + rustsecp256k1_v0_1_0_scalar na, ng; + rustsecp256k1_v0_1_0_scalar_set_int(&na, i); + rustsecp256k1_v0_1_0_scalar_set_int(&ng, j); - secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); + rustsecp256k1_v0_1_0_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); ge_equals_gej(&group[(i * r_log + j) % order], &tmp); if (i > 0) { - secp256k1_ecmult_const(&tmp, &group[i], &ng, 256); + rustsecp256k1_v0_1_0_ecmult_const(&tmp, &group[i], &ng, 256); ge_equals_gej(&group[(i * j) % order], &tmp); } } @@ -183,106 +183,106 @@ void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *gr } typedef struct { - secp256k1_scalar sc[2]; - secp256k1_ge pt[2]; + rustsecp256k1_v0_1_0_scalar sc[2]; + rustsecp256k1_v0_1_0_ge pt[2]; } ecmult_multi_data; -static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_1_0_scalar *sc, rustsecp256k1_v0_1_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, int order) { int i, j, k, x, y; - secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096); + rustsecp256k1_v0_1_0_scratch *scratch = rustsecp256k1_v0_1_0_scratch_create(&ctx->error_callback, 4096); for (i = 0; i < order; i++) { for (j = 0; j < order; j++) { for (k = 0; k < order; k++) { for (x = 0; x < order; x++) { for (y = 0; y < order; y++) { - secp256k1_gej tmp; - secp256k1_scalar g_sc; + rustsecp256k1_v0_1_0_gej tmp; + rustsecp256k1_v0_1_0_scalar g_sc; ecmult_multi_data data; - secp256k1_scalar_set_int(&data.sc[0], i); - secp256k1_scalar_set_int(&data.sc[1], j); - secp256k1_scalar_set_int(&g_sc, k); + rustsecp256k1_v0_1_0_scalar_set_int(&data.sc[0], i); + rustsecp256k1_v0_1_0_scalar_set_int(&data.sc[1], j); + rustsecp256k1_v0_1_0_scalar_set_int(&g_sc, k); data.pt[0] = group[x]; data.pt[1] = group[y]; - secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); + rustsecp256k1_v0_1_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp); } } } } } - secp256k1_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_1_0_scratch_destroy(&ctx->error_callback, scratch); } -void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { - secp256k1_fe x; +void r_from_k(rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_ge *group, int k) { + rustsecp256k1_v0_1_0_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; - secp256k1_fe_normalize(&x); - secp256k1_fe_get_b32(x_bin, &x); - secp256k1_scalar_set_b32(r, x_bin, NULL); + rustsecp256k1_v0_1_0_fe_normalize(&x); + rustsecp256k1_v0_1_0_fe_get_b32(x_bin, &x); + rustsecp256k1_v0_1_0_scalar_set_b32(r, x_bin, NULL); } -void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_verify(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, int order) { int s, r, msg, key; for (s = 1; s < order; s++) { for (r = 1; r < order; r++) { for (msg = 1; msg < order; msg++) { for (key = 1; key < order; key++) { - secp256k1_ge nonconst_ge; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pk; - secp256k1_scalar sk_s, msg_s, r_s, s_s; - secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_1_0_ge nonconst_ge; + rustsecp256k1_v0_1_0_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pk; + rustsecp256k1_v0_1_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_1_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; - secp256k1_scalar_set_int(&s_s, s); - secp256k1_scalar_set_int(&r_s, r); - secp256k1_scalar_set_int(&msg_s, msg); - secp256k1_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_1_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_1_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_1_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_1_0_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < order; k++) { - secp256k1_scalar check_x_s; + rustsecp256k1_v0_1_0_scalar check_x_s; r_from_k(&check_x_s, group, k); if (r_s == check_x_s) { - secp256k1_scalar_set_int(&s_times_k_s, k); - secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_1_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_1_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_1_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_1_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_1_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !secp256k1_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_1_0_scalar_is_high(&s_s); /* Verify by calling verify */ - secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); + rustsecp256k1_v0_1_0_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - secp256k1_pubkey_save(&pk, &nonconst_ge); - secp256k1_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_1_0_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_1_0_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == - secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_sign(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, int order) { int i, j, k; /* Loop */ @@ -290,17 +290,17 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou for (j = 1; j < order; j++) { /* key */ for (k = 1; k < order; k++) { /* nonce */ const int starting_k = k; - secp256k1_ecdsa_signature sig; - secp256k1_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_1_0_ecdsa_signature sig; + rustsecp256k1_v0_1_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; - secp256k1_scalar_set_int(&msg, i); - secp256k1_scalar_set_int(&sk, j); - secp256k1_scalar_get_b32(sk32, &sk); - secp256k1_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_1_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_1_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_1_0_scalar_get_b32(msg32, &msg); - secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_1_0_nonce_function_smallint, &k); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -328,7 +328,7 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou } #ifdef ENABLE_MODULE_RECOVERY -void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_recovery_sign(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, int order) { int i, j, k; /* Loop */ @@ -336,44 +336,44 @@ void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1 for (j = 1; j < order; j++) { /* key */ for (k = 1; k < order; k++) { /* nonce */ const int starting_k = k; - secp256k1_fe r_dot_y_normalized; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_1_0_fe r_dot_y_normalized; + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_1_0_ecdsa_signature sig; + rustsecp256k1_v0_1_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; int expected_recid; int recid; - secp256k1_scalar_set_int(&msg, i); - secp256k1_scalar_set_int(&sk, j); - secp256k1_scalar_get_b32(sk32, &sk); - secp256k1_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_1_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_1_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_1_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_1_0_scalar_get_b32(msg32, &msg); - secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_1_0_nonce_function_smallint, &k); /* Check directly */ - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); r_from_k(&expected_r, group, k); CHECK(r == expected_r); CHECK((k * s) % order == (i + r * j) % order || (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); /* In computing the recid, there is an overflow condition that is disabled in - * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value + * scalar_low_impl.h `rustsecp256k1_v0_1_0_scalar_set_b32` because almost every r.y value * will exceed the group order, and our signing code always holds out for r * values that don't overflow, so with a proper overflow check the tests would * loop indefinitely. */ r_dot_y_normalized = group[k].y; - secp256k1_fe_normalize(&r_dot_y_normalized); + rustsecp256k1_v0_1_0_fe_normalize(&r_dot_y_normalized); /* Also the recovery id is flipped depending if we hit the low-s branch */ if ((k * s) % order == (i + r * j) % order) { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; + expected_recid = rustsecp256k1_v0_1_0_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; } else { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; + expected_recid = rustsecp256k1_v0_1_0_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; } CHECK(recid == expected_recid); /* Convert to a standard sig then check */ - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_1_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -391,46 +391,46 @@ void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1 } } -void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_recovery_verify(const rustsecp256k1_v0_1_0_context *ctx, const rustsecp256k1_v0_1_0_ge *group, int order) { /* This is essentially a copy of test_exhaustive_verify, with recovery added */ int s, r, msg, key; for (s = 1; s < order; s++) { for (r = 1; r < order; r++) { for (msg = 1; msg < order; msg++) { for (key = 1; key < order; key++) { - secp256k1_ge nonconst_ge; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pk; - secp256k1_scalar sk_s, msg_s, r_s, s_s; - secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_1_0_ge nonconst_ge; + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_1_0_ecdsa_signature sig; + rustsecp256k1_v0_1_0_pubkey pk; + rustsecp256k1_v0_1_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_1_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int recid = 0; int k, should_verify; unsigned char msg32[32]; - secp256k1_scalar_set_int(&s_s, s); - secp256k1_scalar_set_int(&r_s, r); - secp256k1_scalar_set_int(&msg_s, msg); - secp256k1_scalar_set_int(&sk_s, key); - secp256k1_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_1_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_1_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_1_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_1_0_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_1_0_scalar_get_b32(msg32, &msg_s); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < order; k++) { - secp256k1_scalar check_x_s; + rustsecp256k1_v0_1_0_scalar check_x_s; r_from_k(&check_x_s, group, k); if (r_s == check_x_s) { - secp256k1_scalar_set_int(&s_times_k_s, k); - secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_1_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_1_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_1_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_1_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_1_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !secp256k1_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_1_0_scalar_is_high(&s_s); /* We would like to try recovering the pubkey and checking that it matches, * but pubkey recovery is impossible in the exhaustive tests (the reason @@ -438,12 +438,12 @@ void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256 * overlap between the sets, so there are no valid signatures). */ /* Verify by converting to a standard signature and calling verify */ - secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - secp256k1_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_1_0_pubkey_save(&pk, &nonconst_ge); CHECK(should_verify == - secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } @@ -453,40 +453,40 @@ void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256 int main(void) { int i; - secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; - secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_1_0_gej groupj[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_1_0_ge group[EXHAUSTIVE_TEST_ORDER]; /* Build context */ - secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_1_0_context *ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); /* TODO set z = 1, then do num_tests runs with random z values */ /* Generate the entire group */ - secp256k1_gej_set_infinity(&groupj[0]); - secp256k1_ge_set_gej(&group[0], &groupj[0]); + rustsecp256k1_v0_1_0_gej_set_infinity(&groupj[0]); + rustsecp256k1_v0_1_0_ge_set_gej(&group[0], &groupj[0]); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* Set a different random z-value for each Jacobian point */ - secp256k1_fe z; + rustsecp256k1_v0_1_0_fe z; random_fe(&z); - secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); - secp256k1_ge_set_gej(&group[i], &groupj[i]); - secp256k1_gej_rescale(&groupj[i], &z); + rustsecp256k1_v0_1_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_1_0_ge_const_g); + rustsecp256k1_v0_1_0_ge_set_gej(&group[i], &groupj[i]); + rustsecp256k1_v0_1_0_gej_rescale(&groupj[i], &z); /* Verify against ecmult_gen */ { - secp256k1_scalar scalar_i; - secp256k1_gej generatedj; - secp256k1_ge generated; + rustsecp256k1_v0_1_0_scalar scalar_i; + rustsecp256k1_v0_1_0_gej generatedj; + rustsecp256k1_v0_1_0_ge generated; - secp256k1_scalar_set_int(&scalar_i, i); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - secp256k1_ge_set_gej(&generated, &generatedj); + rustsecp256k1_v0_1_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_1_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + rustsecp256k1_v0_1_0_ge_set_gej(&generated, &generatedj); CHECK(group[i].infinity == 0); CHECK(generated.infinity == 0); - CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); - CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&generated.x, &group[i].x)); + CHECK(rustsecp256k1_v0_1_0_fe_equal_var(&generated.y, &group[i].y)); } } @@ -505,7 +505,7 @@ int main(void) { test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); #endif - secp256k1_context_destroy(ctx); + rustsecp256k1_v0_1_0_context_destroy(ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/util.h b/secp256k1-sys/depend/secp256k1/src/util.h index 9d750d9..8c58cd9 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h +++ b/secp256k1-sys/depend/secp256k1/src/util.h @@ -18,9 +18,9 @@ typedef struct { void (*fn)(const char *text, void* data); const void* data; -} secp256k1_callback; +} rustsecp256k1_v0_1_0_callback; -static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_1_0_callback_call(const rustsecp256k1_v0_1_0_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } diff --git a/secp256k1-sys/depend/util.h.patch b/secp256k1-sys/depend/util.h.patch new file mode 100644 index 0000000..92731c7 --- /dev/null +++ b/secp256k1-sys/depend/util.h.patch @@ -0,0 +1,17 @@ +71,86d70 +< static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) { +< void *ret = malloc(size); +< if (ret == NULL) { +< secp256k1_callback_call(cb, "Out of memory"); +< } +< return ret; +< } +< +< static SECP256K1_INLINE void *checked_realloc(const secp256k1_callback* cb, void *ptr, size_t size) { +< void *ret = realloc(ptr, size); +< if (ret == NULL) { +< secp256k1_callback_call(cb, "Out of memory"); +< } +< return ret; +< } +< diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index 7577a85..f51864b 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -157,71 +157,91 @@ impl Default for SharedSecret { #[cfg(not(feature = "fuzztarget"))] extern "C" { /// Default ECDH hash function + #[link_name = "rustsecp256k1_v0_1_0_ecdh_hash_function_default"] pub static secp256k1_ecdh_hash_function_default: EcdhHashFn; + #[link_name = "rustsecp256k1_v0_1_0_nonce_function_rfc6979"] pub static secp256k1_nonce_function_rfc6979: NonceFn; + #[link_name = "rustsecp256k1_v0_1_0_nonce_function_default"] pub static secp256k1_nonce_function_default: NonceFn; + #[link_name = "rustsecp256k1_v0_1_0_context_no_precomp"] pub static secp256k1_context_no_precomp: *const Context; // Contexts + #[link_name = "rustsecp256k1_v0_1_0_context_preallocated_size"] pub fn secp256k1_context_preallocated_size(flags: c_uint) -> usize; + #[link_name = "rustsecp256k1_v0_1_0_context_preallocated_create"] pub fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context; + #[link_name = "rustsecp256k1_v0_1_0_context_preallocated_destroy"] pub fn secp256k1_context_preallocated_destroy(cx: *mut Context); + #[link_name = "rustsecp256k1_v0_1_0_context_preallocated_clone_size"] pub fn secp256k1_context_preallocated_clone_size(cx: *const Context) -> usize; + #[link_name = "rustsecp256k1_v0_1_0_context_preallocated_clone"] pub fn secp256k1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context; + #[link_name = "rustsecp256k1_v0_1_0_context_randomize"] pub fn secp256k1_context_randomize(cx: *mut Context, seed32: *const c_uchar) -> c_int; // Pubkeys + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_parse"] pub fn secp256k1_ec_pubkey_parse(cx: *const Context, pk: *mut PublicKey, input: *const c_uchar, in_len: usize) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_serialize"] pub fn secp256k1_ec_pubkey_serialize(cx: *const Context, output: *mut c_uchar, out_len: *mut usize, pk: *const PublicKey, compressed: c_uint) -> c_int; // Signatures + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_parse_der"] pub fn secp256k1_ecdsa_signature_parse_der(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: usize) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact"] pub fn secp256k1_ecdsa_signature_parse_compact(cx: *const Context, sig: *mut Signature, input64: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax"] pub fn ecdsa_signature_parse_der_lax(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: usize) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der"] pub fn secp256k1_ecdsa_signature_serialize_der(cx: *const Context, output: *mut c_uchar, out_len: *mut usize, sig: *const Signature) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact"] pub fn secp256k1_ecdsa_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, sig: *const Signature) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_signature_normalize"] pub fn secp256k1_ecdsa_signature_normalize(cx: *const Context, out_sig: *mut Signature, in_sig: *const Signature) -> c_int; // ECDSA + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_verify"] pub fn secp256k1_ecdsa_verify(cx: *const Context, sig: *const Signature, msg32: *const c_uchar, pk: *const PublicKey) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_sign"] pub fn secp256k1_ecdsa_sign(cx: *const Context, sig: *mut Signature, msg32: *const c_uchar, @@ -231,41 +251,49 @@ extern "C" { -> c_int; // EC + #[link_name = "rustsecp256k1_v0_1_0_ec_seckey_verify"] pub fn secp256k1_ec_seckey_verify(cx: *const Context, sk: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_create"] pub fn secp256k1_ec_pubkey_create(cx: *const Context, pk: *mut PublicKey, sk: *const c_uchar) -> c_int; //TODO secp256k1_ec_privkey_export //TODO secp256k1_ec_privkey_import + #[link_name = "rustsecp256k1_v0_1_0_ec_privkey_tweak_add"] pub fn secp256k1_ec_privkey_tweak_add(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_tweak_add"] pub fn secp256k1_ec_pubkey_tweak_add(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_privkey_tweak_mul"] pub fn secp256k1_ec_privkey_tweak_mul(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul"] pub fn secp256k1_ec_pubkey_tweak_mul(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ec_pubkey_combine"] pub fn secp256k1_ec_pubkey_combine(cx: *const Context, out: *mut PublicKey, ins: *const *const PublicKey, n: c_int) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdh"] pub fn secp256k1_ecdh( cx: *const Context, output: *mut SharedSecret, @@ -287,7 +315,7 @@ extern "C" { /// This will create a secp256k1 raw context. // Returns: a newly created context object. // In: flags: which parts of the context to initialize. -pub unsafe extern "C" fn secp256k1_context_create(flags: c_uint) -> *mut Context { +pub unsafe extern "C" fn rustsecp256k1_v0_1_0_context_create(flags: c_uint) -> *mut Context { use std::mem; assert!(mem::align_of::() >= mem::align_of::()); assert_eq!(mem::size_of::(), mem::size_of::<&usize>()); @@ -303,6 +331,11 @@ pub unsafe extern "C" fn secp256k1_context_create(flags: c_uint) -> *mut Context secp256k1_context_preallocated_create(ptr as *mut c_void, flags) } +#[cfg(all(feature = "std", not(feature = "dont_replace_c_symbols")))] +pub unsafe fn secp256k1_context_create(flags: c_uint) -> *mut Context { + rustsecp256k1_v0_1_0_context_create(flags) +} + #[cfg(all(feature = "std", not(feature = "dont_replace_c_symbols")))] #[no_mangle] /// A reimplementation of the C function `secp256k1_context_destroy` in rust. @@ -311,7 +344,7 @@ pub unsafe extern "C" fn secp256k1_context_create(flags: c_uint) -> *mut Context /// /// The pointer shouldn't be used after passing to this function, consider it as passing it to `free()`. /// -pub unsafe extern "C" fn secp256k1_context_destroy(ctx: *mut Context) { +pub unsafe extern "C" fn rustsecp256k1_v0_1_0_context_destroy(ctx: *mut Context) { secp256k1_context_preallocated_destroy(ctx); let ctx: *mut usize = ctx as *mut usize; @@ -321,6 +354,11 @@ pub unsafe extern "C" fn secp256k1_context_destroy(ctx: *mut Context) { let _ = Box::from_raw(slice as *mut [usize]); } +#[cfg(all(feature = "std", not(feature = "dont_replace_c_symbols")))] +pub unsafe fn secp256k1_context_destroy(ctx: *mut Context) { + rustsecp256k1_v0_1_0_context_destroy(ctx) +} + #[cfg(not(feature = "dont_replace_c_symbols"))] #[no_mangle] @@ -342,7 +380,7 @@ pub unsafe extern "C" fn secp256k1_context_destroy(ctx: *mut Context) { /// /// See also secp256k1_default_error_callback_fn. /// -pub unsafe extern "C" fn secp256k1_default_illegal_callback_fn(message: *const c_char, _data: *mut c_void) { +pub unsafe extern "C" fn rustsecp256k1_v0_1_0_default_illegal_callback_fn(message: *const c_char, _data: *mut c_void) { use core::str; let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); let msg = str::from_utf8_unchecked(msg_slice); @@ -365,7 +403,7 @@ pub unsafe extern "C" fn secp256k1_default_illegal_callback_fn(message: *const c /// /// See also secp256k1_default_illegal_callback_fn. /// -pub unsafe extern "C" fn secp256k1_default_error_callback_fn(message: *const c_char, _data: *mut c_void) { +pub unsafe extern "C" fn rustsecp256k1_v0_1_0_default_error_callback_fn(message: *const c_char, _data: *mut c_void) { use core::str; let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); let msg = str::from_utf8_unchecked(msg_slice); diff --git a/secp256k1-sys/src/recovery.rs b/secp256k1-sys/src/recovery.rs index f64c967..8e65bd3 100644 --- a/secp256k1-sys/src/recovery.rs +++ b/secp256k1-sys/src/recovery.rs @@ -41,17 +41,21 @@ impl Default for RecoverableSignature { #[cfg(not(feature = "fuzztarget"))] extern "C" { + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact"] pub fn secp256k1_ecdsa_recoverable_signature_parse_compact(cx: *const Context, sig: *mut RecoverableSignature, input64: *const c_uchar, recid: c_int) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact"] pub fn secp256k1_ecdsa_recoverable_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, recid: *mut c_int, sig: *const RecoverableSignature) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert"] pub fn secp256k1_ecdsa_recoverable_signature_convert(cx: *const Context, sig: *mut Signature, input: *const RecoverableSignature) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_sign_recoverable"] pub fn secp256k1_ecdsa_sign_recoverable(cx: *const Context, sig: *mut RecoverableSignature, msg32: *const c_uchar, @@ -60,6 +64,7 @@ extern "C" { noncedata: *const c_void) -> c_int; + #[link_name = "rustsecp256k1_v0_1_0_ecdsa_recover"] pub fn secp256k1_ecdsa_recover(cx: *const Context, pk: *mut PublicKey, sig: *const RecoverableSignature, diff --git a/secp256k1-sys/vendor-libsecp.sh b/secp256k1-sys/vendor-libsecp.sh new file mode 100755 index 0000000..35e6734 --- /dev/null +++ b/secp256k1-sys/vendor-libsecp.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -e + + +if [ -z "$1" ] | [ -z "$2" ]; then + echo "\$1 parameter must be the rust-secp256k1-sys depend directory" + echo "\$2 parameter must be the rust-secp256k1-sys version code (M_m_p format)" + echo "\$3 parameter (optional) can be the revision to check out" + exit 1 +fi + +PARENT_DIR=$1 +VERSIONCODE=$2 +REV=$3 +DIR=secp256k1 +ORIGDIR=$(pwd) + +while true; do + read -r -p "$PARENT_DIR/$DIR will be deleted [yn]: " yn + case $yn in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done + +cd "$PARENT_DIR" || exit 1 +rm -rf "$DIR" +git clone https://github.com/bitcoin-core/secp256k1.git "$DIR" +cd "$DIR" +if [ -n "$REV" ]; then + git checkout "$REV" +fi +HEAD=$(git rev-parse HEAD) +cd .. +echo "# This file was automatically created by $0" > ./secp256k1-HEAD-revision.txt +echo "$HEAD" >> ./secp256k1-HEAD-revision.txt + +# We need to make some source changes to the files. + +# To support compiling for WASM, we need to remove all methods that use malloc. +# To compensate, the secp_context_create and _destroy methods are redefined in Rust. +patch "$DIR/include/secp256k1.h" "./secp256k1.h.patch" +patch "$DIR/src/secp256k1.c" "./secp256k1.c.patch" +patch "$DIR/src/scratch_impl.h" "./scratch_impl.h.patch" +patch "$DIR/src/util.h" "./util.h.patch" + +# Prefix all methods with rustsecp and a version prefix +find "$DIR" -not -path '*/\.*' -type f -print0 | xargs -0 sed -i "/^#include/! s/secp256k1_/rustsecp256k1_v${VERSIONCODE}_/g" + +# special rule for a method that is not prefixed in libsecp +find "$DIR" -not -path '*/\.*' -type f -print0 | xargs -0 sed -i "/^#include/! s/ecdsa_signature_parse_der_lax/rustsecp256k1_v${VERSIONCODE}_ecdsa_signature_parse_der_lax/g" + +# TODO: can be removed once 496c5b43b lands in secp-zkp +find "$DIR" -not -path '*/\.*' -type f -print0 | xargs -0 sed -i 's/^const int CURVE_B/static const int CURVE_B/g' + +while true; do + read -r -p "Update Rust extern references as well? [yn]: " yn + case $yn in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done + +cd "$ORIGDIR" +find "./src/" -name "*.rs" -type f -print0 | xargs -0 sed -i -r "s/rustsecp256k1_v[0-9]+_[0-9]+_[0-9]+_(.*)([\"\(])/rustsecp256k1_v${VERSIONCODE}_\1\2/g" +