Merge rust-bitcoin/rust-secp256k1#653: Update vendored lib secp256k1 to v0.4.0
80b2a8d4aa
Update vendored libsecp to v0.4.0 (Davidson Souza)d2285c929a
ci: Remove MIPS* from CI (Davidson Souza)0d58f50d52
ci: generalize grp in "illegal callback" test (Andrew Poelstra)acf9ac13e9
delete `test_manual_create_destroy` test (Andrew Poelstra)04ce50891b
lib: fix bad unit test (Andrew Poelstra)e4cca901ea
gitignore: remove things that shouldn't be there (Andrew Poelstra) Pull request description: Replaces #645 and #652. Precedes #627. I'm basically using #652 but resolving the linking problems, My local CI is erring on windows cross-test, but I can compile without issue with `cargo build --target x86_64-pc-windows-gnu`. Some MIPS jobs failed before even installing cross, I think those aren't really related to this PR. Any ideas on what can be happening? ACKs for top commit: apoelstra: ACK80b2a8d4aa
Tree-SHA512: 62c2e04348110e3995111fa666f10dcc403b963770d047361f9209cf45b45db8744a7eb6d9ee3278d18007412dab5131ac3e1dd3e3d704963c6a6f232d57199a
This commit is contained in:
commit
da4f67b274
|
@ -27,10 +27,6 @@ jobs:
|
|||
- arm-unknown-linux-gnueabi
|
||||
- arm-unknown-linux-gnueabihf
|
||||
- armv7-unknown-linux-gnueabihf
|
||||
- mips-unknown-linux-gnu
|
||||
- mips64-unknown-linux-gnuabi64
|
||||
- mips64el-unknown-linux-gnuabi64
|
||||
- mipsel-unknown-linux-gnu
|
||||
- powerpc-unknown-linux-gnu
|
||||
# - powerpc64-unknown-linux-gnu # not supported by cross
|
||||
- powerpc64le-unknown-linux-gnu
|
||||
|
|
|
@ -1,9 +1,2 @@
|
|||
target/
|
||||
Cargo.lock
|
||||
|
||||
#IntelliJ project files
|
||||
.idea
|
||||
*.iml
|
||||
|
||||
CMakeLists.txt
|
||||
cmake-build-debug
|
||||
|
|
|
@ -273,7 +273,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "secp256k1-sys"
|
||||
version = "0.8.1"
|
||||
version = "0.9.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
|
|
@ -194,7 +194,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "secp256k1-sys"
|
||||
version = "0.8.1"
|
||||
version = "0.9.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
|
|
@ -35,7 +35,7 @@ global-context = ["std"]
|
|||
global-context-less-secure = ["global-context"]
|
||||
|
||||
[dependencies]
|
||||
secp256k1-sys = { version = "0.8.1", default-features = false, path = "./secp256k1-sys" }
|
||||
secp256k1-sys = { version = "0.9.0", default-features = false, path = "./secp256k1-sys" }
|
||||
serde = { version = "1.0.103", default-features = false, optional = true }
|
||||
|
||||
# You likely only want to enable these if you explicitly do not want to use "std", otherwise enable
|
||||
|
|
|
@ -23,7 +23,7 @@ fi
|
|||
# Test if panic in C code aborts the process (either with a real panic or with SIGILL)
|
||||
cargo test -- --ignored --exact 'tests::test_panic_raw_ctx_should_terminate_abnormally' 2>&1 \
|
||||
| tee /dev/stderr \
|
||||
| grep "SIGILL\\|\[libsecp256k1] illegal argument. !rustsecp256k1_v0_._._fe_is_zero(&ge->x)"
|
||||
| grep "SIGILL\\|\[libsecp256k1] illegal argument. "
|
||||
|
||||
# Make all cargo invocations verbose
|
||||
export CARGO_TERM_VERBOSE=true
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "secp256k1-sys"
|
||||
version = "0.8.1"
|
||||
version = "0.9.0"
|
||||
authors = [ "Dawid Ciężarkiewicz <dpc@ucore.info>",
|
||||
"Andrew Poelstra <apoelstra@wpsoftware.net>",
|
||||
"Steven Roose <steven@stevenroose.org>" ]
|
||||
|
@ -12,7 +12,7 @@ description = "FFI for Pieter Wuille's `libsecp256k1` library."
|
|||
keywords = [ "secp256k1", "libsecp256k1", "ffi" ]
|
||||
readme = "README.md"
|
||||
build = "build.rs"
|
||||
links = "rustsecp256k1_v0_8_1"
|
||||
links = "rustsecp256k1_v0_9_0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
10c10,12
|
||||
<
|
||||
---
|
||||
> extern int secp256k1_ecdsa_signature_parse_compact(
|
||||
> const secp256k1_context *ctx,
|
||||
> secp256k1_ecdsa_signature *sig, const unsigned char *input64);
|
|
@ -1,2 +1,2 @@
|
|||
# This file was automatically created by vendor-libsecp.sh
|
||||
21ffe4b22a9683cf24ae0763359e401d1284cc7a
|
||||
199d27cea32203b224b208627533c2e813cd3b21
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
139,149d138
|
||||
140,150d139
|
||||
< secp256k1_context* secp256k1_context_create(unsigned int flags) {
|
||||
< size_t const prealloc_size = secp256k1_context_preallocated_size(flags);
|
||||
< secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size);
|
||||
|
@ -10,27 +10,34 @@
|
|||
< return ctx;
|
||||
< }
|
||||
<
|
||||
164,174d152
|
||||
162,174d150
|
||||
< secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
|
||||
< secp256k1_context* ret;
|
||||
< size_t prealloc_size;
|
||||
<
|
||||
< VERIFY_CHECK(ctx != NULL);
|
||||
< ARG_CHECK(secp256k1_context_is_proper(ctx));
|
||||
<
|
||||
< prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
|
||||
< ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size);
|
||||
< ret = secp256k1_context_preallocated_clone(ctx, ret);
|
||||
< return ret;
|
||||
< }
|
||||
<
|
||||
183,189d160
|
||||
186,197d161
|
||||
< void secp256k1_context_destroy(secp256k1_context* ctx) {
|
||||
< if (ctx != NULL) {
|
||||
< ARG_CHECK_VOID(ctx == NULL || secp256k1_context_is_proper(ctx));
|
||||
<
|
||||
< /* Defined as noop */
|
||||
< if (ctx == NULL) {
|
||||
< return;
|
||||
< }
|
||||
<
|
||||
< secp256k1_context_preallocated_destroy(ctx);
|
||||
< free(ctx);
|
||||
< }
|
||||
< }
|
||||
<
|
||||
206,215d176
|
||||
220,229d183
|
||||
< }
|
||||
<
|
||||
< secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* ctx, size_t max_size) {
|
||||
|
|
|
@ -1,22 +1,36 @@
|
|||
226,228d225
|
||||
236d235
|
||||
< SECP256K1_API const secp256k1_context *secp256k1_context_static;
|
||||
239,240d237
|
||||
< SECP256K1_API const secp256k1_context *secp256k1_context_no_precomp
|
||||
< SECP256K1_DEPRECATED("Use secp256k1_context_static instead");
|
||||
286,289d282
|
||||
< SECP256K1_API secp256k1_context *secp256k1_context_create(
|
||||
< unsigned int flags
|
||||
< ) SECP256K1_WARN_UNUSED_RESULT;
|
||||
231,233d227
|
||||
<
|
||||
302,305d294
|
||||
< SECP256K1_API secp256k1_context *secp256k1_context_clone(
|
||||
< const secp256k1_context *ctx
|
||||
< ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
|
||||
248,250d241
|
||||
<
|
||||
320,323d308
|
||||
< SECP256K1_API void secp256k1_context_destroy(
|
||||
< secp256k1_context *ctx
|
||||
< ) SECP256K1_ARG_NONNULL(1);
|
||||
327,330d317
|
||||
<
|
||||
402,406d386
|
||||
< SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space *secp256k1_scratch_space_create(
|
||||
< const secp256k1_context *ctx,
|
||||
< size_t size
|
||||
< ) SECP256K1_ARG_NONNULL(1);
|
||||
338,341d324
|
||||
<
|
||||
413,417d392
|
||||
< SECP256K1_API void secp256k1_scratch_space_destroy(
|
||||
< const secp256k1_context *ctx,
|
||||
< secp256k1_scratch_space *scratch
|
||||
< ) SECP256K1_ARG_NONNULL(1);
|
||||
<
|
||||
636d610
|
||||
< SECP256K1_API const secp256k1_nonce_function secp256k1_nonce_function_rfc6979;
|
||||
639d612
|
||||
< SECP256K1_API const secp256k1_nonce_function secp256k1_nonce_function_default;
|
||||
|
|
|
@ -1,377 +0,0 @@
|
|||
env:
|
||||
### compiler options
|
||||
HOST:
|
||||
# Specific warnings can be disabled with -Wno-error=foo.
|
||||
# -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual.
|
||||
WERROR_CFLAGS: -Werror -pedantic-errors
|
||||
MAKEFLAGS: -j4
|
||||
BUILD: check
|
||||
### secp256k1 config
|
||||
ECMULTWINDOW: auto
|
||||
ECMULTGENPRECISION: auto
|
||||
ASM: no
|
||||
WIDEMUL: auto
|
||||
WITH_VALGRIND: yes
|
||||
EXTRAFLAGS:
|
||||
### secp256k1 modules
|
||||
EXPERIMENTAL: no
|
||||
ECDH: no
|
||||
RECOVERY: no
|
||||
SCHNORRSIG: no
|
||||
### test options
|
||||
SECP256K1_TEST_ITERS:
|
||||
BENCH: yes
|
||||
SECP256K1_BENCH_ITERS: 2
|
||||
CTIMETEST: yes
|
||||
# Compile and run the tests
|
||||
EXAMPLES: yes
|
||||
|
||||
# https://cirrus-ci.org/pricing/#compute-credits
|
||||
credits_snippet: &CREDITS
|
||||
# Don't use any credits for now.
|
||||
use_compute_credits: false
|
||||
|
||||
cat_logs_snippet: &CAT_LOGS
|
||||
always:
|
||||
cat_tests_log_script:
|
||||
- cat tests.log || true
|
||||
cat_exhaustive_tests_log_script:
|
||||
- cat exhaustive_tests.log || true
|
||||
cat_valgrind_ctime_test_log_script:
|
||||
- cat valgrind_ctime_test.log || true
|
||||
cat_bench_log_script:
|
||||
- cat bench.log || true
|
||||
cat_config_log_script:
|
||||
- cat config.log || true
|
||||
cat_test_env_script:
|
||||
- cat test_env.log || true
|
||||
cat_ci_env_script:
|
||||
- env
|
||||
|
||||
merge_base_script_snippet: &MERGE_BASE
|
||||
merge_base_script:
|
||||
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
|
||||
- git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
|
||||
- git config --global user.email "ci@ci.ci"
|
||||
- git config --global user.name "ci"
|
||||
- git merge FETCH_HEAD # Merge base to detect silent merge conflicts
|
||||
|
||||
linux_container_snippet: &LINUX_CONTAINER
|
||||
container:
|
||||
dockerfile: ci/linux-debian.Dockerfile
|
||||
# Reduce number of CPUs to be able to do more builds in parallel.
|
||||
cpu: 1
|
||||
# Gives us more CPUs for free if they're available.
|
||||
greedy: true
|
||||
# More than enough for our scripts.
|
||||
memory: 1G
|
||||
|
||||
task:
|
||||
name: "x86_64: Linux (Debian stable)"
|
||||
<< : *LINUX_CONTAINER
|
||||
matrix: &ENV_MATRIX
|
||||
- env: {WIDEMUL: int64, RECOVERY: yes}
|
||||
- env: {WIDEMUL: int64, ECDH: yes, SCHNORRSIG: yes}
|
||||
- env: {WIDEMUL: int128}
|
||||
- env: {WIDEMUL: int128_struct}
|
||||
- env: {WIDEMUL: int128, RECOVERY: yes, SCHNORRSIG: yes}
|
||||
- env: {WIDEMUL: int128, ECDH: yes, SCHNORRSIG: yes}
|
||||
- env: {WIDEMUL: int128, ASM: x86_64}
|
||||
- env: { RECOVERY: yes, SCHNORRSIG: yes}
|
||||
- env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no}
|
||||
- env: {CPPFLAGS: -DDETERMINISTIC}
|
||||
- env: {CFLAGS: -O0, CTIMETEST: no}
|
||||
- env: { ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
- env: { ECMULTGENPRECISION: 8, ECMULTWINDOW: 4 }
|
||||
matrix:
|
||||
- env:
|
||||
CC: gcc
|
||||
- env:
|
||||
CC: clang
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "i686: Linux (Debian stable)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
HOST: i686-linux-gnu
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
matrix:
|
||||
- env:
|
||||
CC: i686-linux-gnu-gcc
|
||||
- env:
|
||||
CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "arm64: macOS Ventura"
|
||||
macos_instance:
|
||||
image: ghcr.io/cirruslabs/macos-ventura-base:latest
|
||||
env:
|
||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
# Cirrus gives us a fixed number of 4 virtual CPUs. Not that we even have that many jobs at the moment...
|
||||
MAKEFLAGS: -j5
|
||||
matrix:
|
||||
<< : *ENV_MATRIX
|
||||
env:
|
||||
ASM: no
|
||||
WITH_VALGRIND: no
|
||||
CTIMETEST: no
|
||||
matrix:
|
||||
- env:
|
||||
CC: gcc
|
||||
- env:
|
||||
CC: clang
|
||||
brew_script:
|
||||
- brew install automake libtool gcc
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
<< : *CREDITS
|
||||
|
||||
task:
|
||||
name: "s390x (big-endian): Linux (Debian stable, QEMU)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: qemu-s390x
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: s390x-linux-gnu
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
# https://sourceware.org/bugzilla/show_bug.cgi?id=27008
|
||||
- rm /etc/ld.so.cache
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "ARM32: Linux (Debian stable, QEMU)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: qemu-arm
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: arm-linux-gnueabihf
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
matrix:
|
||||
- env: {}
|
||||
- env: {EXPERIMENTAL: yes, ASM: arm}
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "ARM64: Linux (Debian stable, QEMU)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: qemu-aarch64
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: aarch64-linux-gnu
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "ppc64le: Linux (Debian stable, QEMU)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: qemu-ppc64le
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: powerpc64le-linux-gnu
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: wine
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
matrix:
|
||||
- name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)"
|
||||
env:
|
||||
HOST: x86_64-w64-mingw32
|
||||
- name: "i686 (mingw32-w64): Windows (Debian stable, Wine)"
|
||||
env:
|
||||
HOST: i686-w64-mingw32
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
WRAPPER_CMD: wine
|
||||
WERROR_CFLAGS: -WX
|
||||
WITH_VALGRIND: no
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
EXPERIMENTAL: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
# Use a MinGW-w64 host to tell ./configure we're building for Windows.
|
||||
# This will detect some MinGW-w64 tools but then make will need only
|
||||
# the MSVC tools CC, AR and NM as specified below.
|
||||
HOST: x86_64-w64-mingw32
|
||||
CC: /opt/msvc/bin/x64/cl
|
||||
AR: /opt/msvc/bin/x64/lib
|
||||
NM: /opt/msvc/bin/x64/dumpbin -symbols -headers
|
||||
# Set non-essential options that affect the CLI messages here.
|
||||
# (They depend on the user's taste, so we don't want to set them automatically in configure.ac.)
|
||||
CFLAGS: -nologo -diagnostics:caret
|
||||
LDFLAGS: -XCClinker -nologo -XCClinker -diagnostics:caret
|
||||
matrix:
|
||||
- name: "x86_64 (MSVC): Windows (Debian stable, Wine)"
|
||||
- name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct)"
|
||||
env:
|
||||
WIDEMUL: int128_struct
|
||||
- name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct with __(u)mulh)"
|
||||
env:
|
||||
WIDEMUL: int128_struct
|
||||
CPPFLAGS: -DSECP256K1_MSVC_MULH_TEST_OVERRIDE
|
||||
- name: "i686 (MSVC): Windows (Debian stable, Wine)"
|
||||
env:
|
||||
HOST: i686-w64-mingw32
|
||||
CC: /opt/msvc/bin/x86/cl
|
||||
AR: /opt/msvc/bin/x86/lib
|
||||
NM: /opt/msvc/bin/x86/dumpbin -symbols -headers
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
# Sanitizers
|
||||
task:
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
matrix:
|
||||
- name: "Valgrind (memcheck)"
|
||||
container:
|
||||
cpu: 2
|
||||
env:
|
||||
# The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html)
|
||||
WRAPPER_CMD: "valgrind --error-exitcode=42"
|
||||
SECP256K1_TEST_ITERS: 2
|
||||
- name: "UBSan, ASan, LSan"
|
||||
container:
|
||||
memory: 2G
|
||||
env:
|
||||
CFLAGS: "-fsanitize=undefined,address -g"
|
||||
UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1"
|
||||
ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1"
|
||||
LSAN_OPTIONS: "use_unaligned=1"
|
||||
SECP256K1_TEST_ITERS: 32
|
||||
# Try to cover many configurations with just a tiny matrix.
|
||||
matrix:
|
||||
- env:
|
||||
ASM: auto
|
||||
- env:
|
||||
ASM: no
|
||||
ECMULTGENPRECISION: 2
|
||||
ECMULTWINDOW: 2
|
||||
matrix:
|
||||
- env:
|
||||
CC: clang
|
||||
- env:
|
||||
HOST: i686-linux-gnu
|
||||
CC: i686-linux-gnu-gcc
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
# Memory sanitizers
|
||||
task:
|
||||
<< : *LINUX_CONTAINER
|
||||
name: "MSan"
|
||||
env:
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
CTIMETEST: no
|
||||
CC: clang
|
||||
SECP256K1_TEST_ITERS: 32
|
||||
ASM: no
|
||||
container:
|
||||
memory: 2G
|
||||
matrix:
|
||||
- env:
|
||||
CFLAGS: "-fsanitize=memory -g"
|
||||
- env:
|
||||
ECMULTGENPRECISION: 2
|
||||
ECMULTWINDOW: 2
|
||||
CFLAGS: "-fsanitize=memory -g -O3"
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "C++ -fpermissive (entire project)"
|
||||
<< : *LINUX_CONTAINER
|
||||
env:
|
||||
CC: g++
|
||||
CFLAGS: -fpermissive -g
|
||||
CPPFLAGS: -DSECP256K1_CPLUSPLUS_TEST_OVERRIDE
|
||||
WERROR_CFLAGS:
|
||||
ECDH: yes
|
||||
RECOVERY: yes
|
||||
SCHNORRSIG: yes
|
||||
<< : *MERGE_BASE
|
||||
test_script:
|
||||
- ./ci/cirrus.sh
|
||||
<< : *CAT_LOGS
|
||||
|
||||
task:
|
||||
name: "C++ (public headers)"
|
||||
<< : *LINUX_CONTAINER
|
||||
test_script:
|
||||
- g++ -Werror include/*.h
|
||||
- clang -Werror -x c++-header include/*.h
|
||||
- /opt/msvc/bin/x64/cl.exe -c -WX -TP include/*.h
|
||||
|
||||
task:
|
||||
name: "sage prover"
|
||||
<< : *LINUX_CONTAINER
|
||||
test_script:
|
||||
- cd sage
|
||||
- sage prove_group_implementations.sage
|
33
secp256k1-sys/depend/secp256k1/.github/actions/install-homebrew-valgrind/action.yml
vendored
Normal file
33
secp256k1-sys/depend/secp256k1/.github/actions/install-homebrew-valgrind/action.yml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
name: "Install Valgrind"
|
||||
description: "Install Homebrew's Valgrind package and cache it."
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- run: |
|
||||
brew tap LouisBrunner/valgrind
|
||||
brew fetch --HEAD LouisBrunner/valgrind/valgrind
|
||||
echo "CI_HOMEBREW_CELLAR_VALGRIND=$(brew --cellar valgrind)" >> "$GITHUB_ENV"
|
||||
shell: bash
|
||||
|
||||
- run: |
|
||||
sw_vers > valgrind_fingerprint
|
||||
brew --version >> valgrind_fingerprint
|
||||
git -C "$(brew --cache)/valgrind--git" rev-parse HEAD >> valgrind_fingerprint
|
||||
cat valgrind_fingerprint
|
||||
shell: bash
|
||||
|
||||
- uses: actions/cache@v3
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.CI_HOMEBREW_CELLAR_VALGRIND }}
|
||||
key: ${{ github.job }}-valgrind-${{ hashFiles('valgrind_fingerprint') }}
|
||||
|
||||
- if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
brew install --HEAD LouisBrunner/valgrind/valgrind
|
||||
shell: bash
|
||||
|
||||
- if: steps.cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
brew link valgrind
|
||||
shell: bash
|
49
secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml
vendored
Normal file
49
secp256k1-sys/depend/secp256k1/.github/actions/run-in-docker-action/action.yml
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
name: 'Run in Docker with environment'
|
||||
description: 'Run a command in a Docker container, while passing explicitly set environment variables into the container.'
|
||||
inputs:
|
||||
dockerfile:
|
||||
description: 'A Dockerfile that defines an image'
|
||||
required: true
|
||||
tag:
|
||||
description: 'A tag of an image'
|
||||
required: true
|
||||
command:
|
||||
description: 'A command to run in a container'
|
||||
required: false
|
||||
default: ./ci/ci.sh
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
|
||||
- uses: docker/build-push-action@v4
|
||||
id: main_builder
|
||||
continue-on-error: true
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.dockerfile }}
|
||||
tags: ${{ inputs.tag }}
|
||||
load: true
|
||||
cache-from: type=gha
|
||||
|
||||
- uses: docker/build-push-action@v4
|
||||
id: retry_builder
|
||||
if: steps.main_builder.outcome == 'failure'
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.dockerfile }}
|
||||
tags: ${{ inputs.tag }}
|
||||
load: true
|
||||
cache-from: type=gha
|
||||
|
||||
- # Tell Docker to pass environment variables in `env` into the container.
|
||||
run: >
|
||||
docker run \
|
||||
$(echo '${{ toJSON(env) }}' | jq -r 'keys[] | "--env \(.) "') \
|
||||
--volume ${{ github.workspace }}:${{ github.workspace }} \
|
||||
--workdir ${{ github.workspace }} \
|
||||
${{ inputs.tag }} bash -c "
|
||||
git config --global --add safe.directory ${{ github.workspace }}
|
||||
${{ inputs.command }}
|
||||
"
|
||||
shell: bash
|
|
@ -0,0 +1,806 @@
|
|||
name: CI
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.event_name != 'pull_request' && github.run_id || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
### compiler options
|
||||
HOST:
|
||||
WRAPPER_CMD:
|
||||
# Specific warnings can be disabled with -Wno-error=foo.
|
||||
# -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual.
|
||||
WERROR_CFLAGS: '-Werror -pedantic-errors'
|
||||
MAKEFLAGS: '-j4'
|
||||
BUILD: 'check'
|
||||
### secp256k1 config
|
||||
ECMULTWINDOW: 'auto'
|
||||
ECMULTGENPRECISION: 'auto'
|
||||
ASM: 'no'
|
||||
WIDEMUL: 'auto'
|
||||
WITH_VALGRIND: 'yes'
|
||||
EXTRAFLAGS:
|
||||
### secp256k1 modules
|
||||
EXPERIMENTAL: 'no'
|
||||
ECDH: 'no'
|
||||
RECOVERY: 'no'
|
||||
SCHNORRSIG: 'no'
|
||||
ELLSWIFT: 'no'
|
||||
### test options
|
||||
SECP256K1_TEST_ITERS:
|
||||
BENCH: 'yes'
|
||||
SECP256K1_BENCH_ITERS: 2
|
||||
CTIMETESTS: 'yes'
|
||||
# Compile and run the examples.
|
||||
EXAMPLES: 'yes'
|
||||
|
||||
jobs:
|
||||
docker_cache:
|
||||
name: "Build Docker image"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
# See: https://github.com/moby/buildkit/issues/3969.
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: ./ci/linux-debian.Dockerfile
|
||||
tags: linux-debian-image
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=min
|
||||
|
||||
linux_debian:
|
||||
name: "x86_64: Linux (Debian stable)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars: { WIDEMUL: 'int64', RECOVERY: 'yes' }
|
||||
- env_vars: { WIDEMUL: 'int64', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- env_vars: { WIDEMUL: 'int128' }
|
||||
- env_vars: { WIDEMUL: 'int128_struct', ELLSWIFT: 'yes' }
|
||||
- env_vars: { WIDEMUL: 'int128', RECOVERY: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- env_vars: { WIDEMUL: 'int128', ECDH: 'yes', SCHNORRSIG: 'yes' }
|
||||
- env_vars: { WIDEMUL: 'int128', ASM: 'x86_64', ELLSWIFT: 'yes' }
|
||||
- env_vars: { RECOVERY: 'yes', SCHNORRSIG: 'yes' }
|
||||
- env_vars: { CTIMETESTS: 'no', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', CPPFLAGS: '-DVERIFY' }
|
||||
- env_vars: { BUILD: 'distcheck', WITH_VALGRIND: 'no', CTIMETESTS: 'no', BENCH: 'no' }
|
||||
- env_vars: { CPPFLAGS: '-DDETERMINISTIC' }
|
||||
- env_vars: { CFLAGS: '-O0', CTIMETESTS: 'no' }
|
||||
- env_vars: { CFLAGS: '-O1', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- env_vars: { ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
- env_vars: { ECMULTGENPRECISION: 8, ECMULTWINDOW: 4 }
|
||||
cc:
|
||||
- 'gcc'
|
||||
- 'clang'
|
||||
- 'gcc-snapshot'
|
||||
- 'clang-snapshot'
|
||||
|
||||
env:
|
||||
CC: ${{ matrix.cc }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
i686_debian:
|
||||
name: "i686: Linux (Debian stable)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
cc:
|
||||
- 'i686-linux-gnu-gcc'
|
||||
- 'clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include'
|
||||
|
||||
env:
|
||||
HOST: 'i686-linux-gnu'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CC: ${{ matrix.cc }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
s390x_debian:
|
||||
name: "s390x (big-endian): Linux (Debian stable, QEMU)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
env:
|
||||
WRAPPER_CMD: 'qemu-s390x'
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: 's390x-linux-gnu'
|
||||
WITH_VALGRIND: 'no'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
arm32_debian:
|
||||
name: "ARM32: Linux (Debian stable, QEMU)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars: {}
|
||||
- env_vars: { EXPERIMENTAL: 'yes', ASM: 'arm32' }
|
||||
|
||||
env:
|
||||
WRAPPER_CMD: 'qemu-arm'
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: 'arm-linux-gnueabihf'
|
||||
WITH_VALGRIND: 'no'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
arm64_debian:
|
||||
name: "ARM64: Linux (Debian stable, QEMU)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
env:
|
||||
WRAPPER_CMD: 'qemu-aarch64'
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: 'aarch64-linux-gnu'
|
||||
WITH_VALGRIND: 'no'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars: { } # gcc
|
||||
- env_vars: # clang
|
||||
CC: 'clang --target=aarch64-linux-gnu'
|
||||
- env_vars: # clang-snapshot
|
||||
CC: 'clang-snapshot --target=aarch64-linux-gnu'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
ppc64le_debian:
|
||||
name: "ppc64le: Linux (Debian stable, QEMU)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
env:
|
||||
WRAPPER_CMD: 'qemu-ppc64le'
|
||||
SECP256K1_TEST_ITERS: 16
|
||||
HOST: 'powerpc64le-linux-gnu'
|
||||
WITH_VALGRIND: 'no'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
valgrind_debian:
|
||||
name: "Valgrind (memcheck)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars: { CC: 'clang', ASM: 'auto' }
|
||||
- env_vars: { CC: 'i686-linux-gnu-gcc', HOST: 'i686-linux-gnu', ASM: 'auto' }
|
||||
- env_vars: { CC: 'clang', ASM: 'no', ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
- env_vars: { CC: 'i686-linux-gnu-gcc', HOST: 'i686-linux-gnu', ASM: 'no', ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
|
||||
env:
|
||||
# The `--error-exitcode` is required to make the test fail if valgrind found errors,
|
||||
# otherwise it will return 0 (https://www.valgrind.org/docs/manual/manual-core.html).
|
||||
WRAPPER_CMD: 'valgrind --error-exitcode=42'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
SECP256K1_TEST_ITERS: 2
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
sanitizers_debian:
|
||||
name: "UBSan, ASan, LSan"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars: { CC: 'clang', ASM: 'auto' }
|
||||
- env_vars: { CC: 'i686-linux-gnu-gcc', HOST: 'i686-linux-gnu', ASM: 'auto' }
|
||||
- env_vars: { CC: 'clang', ASM: 'no', ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
- env_vars: { CC: 'i686-linux-gnu-gcc', HOST: 'i686-linux-gnu', ASM: 'no', ECMULTGENPRECISION: 2, ECMULTWINDOW: 2 }
|
||||
|
||||
env:
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
CFLAGS: '-fsanitize=undefined,address -g'
|
||||
UBSAN_OPTIONS: 'print_stacktrace=1:halt_on_error=1'
|
||||
ASAN_OPTIONS: 'strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1'
|
||||
LSAN_OPTIONS: 'use_unaligned=1'
|
||||
SECP256K1_TEST_ITERS: 32
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
msan_debian:
|
||||
name: "MSan"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- env_vars:
|
||||
CFLAGS: '-fsanitize=memory -fsanitize-recover=memory -g'
|
||||
- env_vars:
|
||||
ECMULTGENPRECISION: 2
|
||||
ECMULTWINDOW: 2
|
||||
CFLAGS: '-fsanitize=memory -fsanitize-recover=memory -g -O3'
|
||||
|
||||
env:
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'yes'
|
||||
CC: 'clang'
|
||||
SECP256K1_TEST_ITERS: 32
|
||||
ASM: 'no'
|
||||
WITH_VALGRIND: 'no'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
mingw_debian:
|
||||
name: ${{ matrix.configuration.job_name }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
env:
|
||||
WRAPPER_CMD: 'wine'
|
||||
WITH_VALGRIND: 'no'
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
CTIMETESTS: 'no'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- job_name: 'x86_64 (mingw32-w64): Windows (Debian stable, Wine)'
|
||||
env_vars:
|
||||
HOST: 'x86_64-w64-mingw32'
|
||||
- job_name: 'i686 (mingw32-w64): Windows (Debian stable, Wine)'
|
||||
env_vars:
|
||||
HOST: 'i686-w64-mingw32'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.configuration.env_vars }}
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
macos-native:
|
||||
name: "x86_64: macOS Monterey"
|
||||
# See: https://github.com/actions/runner-images#available-images.
|
||||
runs-on: macos-12 # Use M1 once available https://github.com/github/roadmap/issues/528
|
||||
|
||||
env:
|
||||
CC: 'clang'
|
||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
env_vars:
|
||||
- { WIDEMUL: 'int64', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- { WIDEMUL: 'int128_struct', ECMULTGENPRECISION: 2, ECMULTWINDOW: 4 }
|
||||
- { WIDEMUL: 'int128', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes' }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', CC: 'gcc' }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', WRAPPER_CMD: 'valgrind --error-exitcode=42', SECP256K1_TEST_ITERS: 2 }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', CC: 'gcc', WRAPPER_CMD: 'valgrind --error-exitcode=42', SECP256K1_TEST_ITERS: 2 }
|
||||
- { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', CPPFLAGS: '-DVERIFY', CTIMETESTS: 'no' }
|
||||
- BUILD: 'distcheck'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Homebrew packages
|
||||
run: |
|
||||
brew install automake libtool gcc
|
||||
ln -s $(brew --prefix gcc)/bin/gcc-?? /usr/local/bin/gcc
|
||||
|
||||
- name: Install and cache Valgrind
|
||||
uses: ./.github/actions/install-homebrew-valgrind
|
||||
|
||||
- name: CI script
|
||||
env: ${{ matrix.env_vars }}
|
||||
run: ./ci/ci.sh
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
win64-native:
|
||||
name: ${{ matrix.configuration.job_name }}
|
||||
# See: https://github.com/actions/runner-images#available-images.
|
||||
runs-on: windows-2022
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
configuration:
|
||||
- job_name: 'x64 (MSVC): Windows (VS 2022, shared)'
|
||||
cmake_options: '-A x64 -DBUILD_SHARED_LIBS=ON'
|
||||
- job_name: 'x64 (MSVC): Windows (VS 2022, static)'
|
||||
cmake_options: '-A x64 -DBUILD_SHARED_LIBS=OFF'
|
||||
- job_name: 'x64 (MSVC): Windows (VS 2022, int128_struct)'
|
||||
cmake_options: '-A x64 -DSECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY=int128_struct'
|
||||
- job_name: 'x64 (MSVC): Windows (VS 2022, int128_struct with __(u)mulh)'
|
||||
cmake_options: '-A x64 -DSECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY=int128_struct'
|
||||
cpp_flags: '/DSECP256K1_MSVC_MULH_TEST_OVERRIDE'
|
||||
- job_name: 'x86 (MSVC): Windows (VS 2022)'
|
||||
cmake_options: '-A Win32'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Generate buildsystem
|
||||
run: cmake -E env CFLAGS="/WX ${{ matrix.configuration.cpp_flags }}" cmake -B build -DSECP256K1_ENABLE_MODULE_RECOVERY=ON -DSECP256K1_BUILD_EXAMPLES=ON ${{ matrix.configuration.cmake_options }}
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build --config RelWithDebInfo -- /p:UseMultiToolTask=true /maxCpuCount
|
||||
|
||||
- name: Binaries info
|
||||
# Use the bash shell included with Git for Windows.
|
||||
shell: bash
|
||||
run: |
|
||||
cd build/src/RelWithDebInfo && file *tests.exe bench*.exe libsecp256k1-*.dll || true
|
||||
|
||||
- name: Check
|
||||
run: |
|
||||
ctest -C RelWithDebInfo --test-dir build -j ([int]$env:NUMBER_OF_PROCESSORS + 1)
|
||||
build\src\RelWithDebInfo\bench_ecmult.exe
|
||||
build\src\RelWithDebInfo\bench_internal.exe
|
||||
build\src\RelWithDebInfo\bench.exe
|
||||
|
||||
win64-native-headers:
|
||||
name: "x64 (MSVC): C++ (public headers)"
|
||||
# See: https://github.com/actions/runner-images#available-images.
|
||||
runs-on: windows-2022
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Add cl.exe to PATH
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
|
||||
- name: C++ (public headers)
|
||||
run: |
|
||||
cl.exe -c -WX -TP include/*.h
|
||||
|
||||
cxx_fpermissive_debian:
|
||||
name: "C++ -fpermissive (entire project)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
env:
|
||||
CC: 'g++'
|
||||
CFLAGS: '-fpermissive -g'
|
||||
CPPFLAGS: '-DSECP256K1_CPLUSPLUS_TEST_OVERRIDE'
|
||||
WERROR_CFLAGS:
|
||||
ECDH: 'yes'
|
||||
RECOVERY: 'yes'
|
||||
SCHNORRSIG: 'yes'
|
||||
ELLSWIFT: 'yes'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
|
||||
- run: cat tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat noverify_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat exhaustive_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat ctime_tests.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat bench.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat config.log || true
|
||||
if: ${{ always() }}
|
||||
- run: cat test_env.log || true
|
||||
if: ${{ always() }}
|
||||
- name: CI env
|
||||
run: env
|
||||
if: ${{ always() }}
|
||||
|
||||
cxx_headers_debian:
|
||||
name: "C++ (public headers)"
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_cache
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
uses: ./.github/actions/run-in-docker-action
|
||||
with:
|
||||
dockerfile: ./ci/linux-debian.Dockerfile
|
||||
tag: linux-debian-image
|
||||
command: |
|
||||
g++ -Werror include/*.h
|
||||
clang -Werror -x c++-header include/*.h
|
||||
|
||||
sage:
|
||||
name: "SageMath prover"
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: sagemath/sagemath:latest
|
||||
options: --user root
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: CI script
|
||||
run: |
|
||||
cd sage
|
||||
sage prove_group_implementations.sage
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- run: ./autogen.sh && ./configure --enable-dev-mode && make distcheck
|
||||
|
||||
- name: Check installation with Autotools
|
||||
env:
|
||||
CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}
|
||||
run: |
|
||||
./autogen.sh && ./configure --prefix=${{ env.CI_INSTALL }} && make clean && make install && ls -RlAh ${{ env.CI_INSTALL }}
|
||||
gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=${{ env.CI_INSTALL }}/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"${{ env.CI_INSTALL }}/lib" && ./ecdsa
|
||||
|
||||
- name: Check installation with CMake
|
||||
env:
|
||||
CI_BUILD: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}/build
|
||||
CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}/install
|
||||
run: |
|
||||
cmake -B ${{ env.CI_BUILD }} -DCMAKE_INSTALL_PREFIX=${{ env.CI_INSTALL }} && cmake --build ${{ env.CI_BUILD }} --target install && ls -RlAh ${{ env.CI_INSTALL }}
|
||||
gcc -o ecdsa examples/ecdsa.c -I ${{ env.CI_INSTALL }}/include -L ${{ env.CI_INSTALL }}/lib*/ -l secp256k1 -Wl,-rpath,"${{ env.CI_INSTALL }}/lib",-rpath,"${{ env.CI_INSTALL }}/lib64" && ./ecdsa
|
|
@ -1,11 +1,12 @@
|
|||
bench
|
||||
bench_ecmult
|
||||
bench_internal
|
||||
noverify_tests
|
||||
tests
|
||||
exhaustive_tests
|
||||
precompute_ecmult_gen
|
||||
precompute_ecmult
|
||||
valgrind_ctime_test
|
||||
ctime_tests
|
||||
ecdh_example
|
||||
ecdsa_example
|
||||
schnorr_example
|
||||
|
@ -42,8 +43,6 @@ coverage.*.html
|
|||
*.gcno
|
||||
*.gcov
|
||||
|
||||
src/libsecp256k1-config.h
|
||||
src/libsecp256k1-config.h.in
|
||||
build-aux/ar-lib
|
||||
build-aux/config.guess
|
||||
build-aux/config.sub
|
||||
|
@ -58,5 +57,9 @@ build-aux/m4/ltversion.m4
|
|||
build-aux/missing
|
||||
build-aux/compile
|
||||
build-aux/test-driver
|
||||
src/stamp-h1
|
||||
libsecp256k1.pc
|
||||
|
||||
### CMake
|
||||
/CMakeUserPresets.json
|
||||
# Default CMake build directory.
|
||||
/build
|
||||
|
|
|
@ -1,28 +1,113 @@
|
|||
# Changelog
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [Unreleased]
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.4.0] - 2023-09-04
|
||||
|
||||
#### Added
|
||||
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
|
||||
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
|
||||
- Header file `include/secp256k1_ellswift.h` which defines the new API.
|
||||
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
|
||||
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
|
||||
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
|
||||
|
||||
#### Fixed
|
||||
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
|
||||
|
||||
#### Changed
|
||||
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
|
||||
|
||||
#### ABI Compatibility
|
||||
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
|
||||
|
||||
## [0.3.2] - 2023-05-13
|
||||
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
|
||||
|
||||
#### Security
|
||||
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
|
||||
|
||||
#### Fixed
|
||||
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
|
||||
|
||||
#### Changed
|
||||
- Various improvements and changes to CMake builds. CMake builds remain experimental.
|
||||
- Made API versioning consistent with GNU Autotools builds.
|
||||
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
|
||||
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
|
||||
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
|
||||
|
||||
#### ABI Compatibility
|
||||
The ABI is compatible with versions 0.3.0 and 0.3.1.
|
||||
|
||||
## [0.3.1] - 2023-04-10
|
||||
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
|
||||
|
||||
#### Security
|
||||
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
|
||||
|
||||
#### Added
|
||||
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
|
||||
|
||||
#### Changed
|
||||
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
|
||||
|
||||
#### ABI Compatibility
|
||||
The ABI is compatible with version 0.3.0.
|
||||
|
||||
## [0.3.0] - 2023-03-08
|
||||
|
||||
#### Added
|
||||
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
|
||||
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
|
||||
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
|
||||
|
||||
#### Fixed
|
||||
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
|
||||
|
||||
#### Changed
|
||||
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
|
||||
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
|
||||
|
||||
#### Removed
|
||||
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
|
||||
|
||||
#### ABI Compatibility
|
||||
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions.
|
||||
|
||||
## [0.2.0] - 2022-12-12
|
||||
|
||||
### Added
|
||||
#### Added
|
||||
- Added usage examples for common use cases in a new `examples/` directory.
|
||||
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
|
||||
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
|
||||
|
||||
### Changed
|
||||
- Enabled modules schnorrsig, extrakeys and ECDH by default in `./configure`.
|
||||
#### Changed
|
||||
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
|
||||
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
|
||||
|
||||
### Deprecated
|
||||
#### Deprecated
|
||||
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
|
||||
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
|
||||
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
|
||||
|
||||
### ABI Compatibility
|
||||
|
||||
#### ABI Compatibility
|
||||
Since this is the first release, we do not compare application binary interfaces.
|
||||
However, there are unreleased versions of libsecp256k1 that are *not* ABI compatible with this version.
|
||||
However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version.
|
||||
|
||||
## [0.1.0] - 2013-03-05 to 2021-12-25
|
||||
|
||||
This version was in fact never released.
|
||||
The number was given by the build system since the introduction of autotools in Jan 2014 (ea0fe5a5bf0c04f9cc955b2966b614f5f378c6f6).
|
||||
Therefore, this version number does not uniquely identify a set of source files.
|
||||
|
||||
[unreleased]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...HEAD
|
||||
[0.4.0]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.2...v0.4.0
|
||||
[0.3.2]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.1...v0.3.2
|
||||
[0.3.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.0...v0.3.1
|
||||
[0.3.0]: https://github.com/bitcoin-core/secp256k1/compare/v0.2.0...v0.3.0
|
||||
[0.2.0]: https://github.com/bitcoin-core/secp256k1/compare/423b6d19d373f1224fd671a982584d7e7900bc93..v0.2.0
|
||||
[0.1.0]: https://github.com/bitcoin-core/secp256k1/commit/423b6d19d373f1224fd671a982584d7e7900bc93
|
||||
|
|
|
@ -0,0 +1,341 @@
|
|||
cmake_minimum_required(VERSION 3.13)
|
||||
|
||||
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.15)
|
||||
# MSVC runtime library flags are selected by the CMAKE_MSVC_RUNTIME_LIBRARY abstraction.
|
||||
cmake_policy(SET CMP0091 NEW)
|
||||
# MSVC warning flags are not in CMAKE_<LANG>_FLAGS by default.
|
||||
cmake_policy(SET CMP0092 NEW)
|
||||
endif()
|
||||
|
||||
project(libsecp256k1
|
||||
# The package (a.k.a. release) version is based on semantic versioning 2.0.0 of
|
||||
# the API. All changes in experimental modules are treated as
|
||||
# backwards-compatible and therefore at most increase the minor version.
|
||||
VERSION 0.4.0
|
||||
DESCRIPTION "Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1."
|
||||
HOMEPAGE_URL "https://github.com/bitcoin-core/secp256k1"
|
||||
LANGUAGES C
|
||||
)
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.21)
|
||||
get_directory_property(parent_directory PARENT_DIRECTORY)
|
||||
if(parent_directory)
|
||||
set(PROJECT_IS_TOP_LEVEL OFF CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
|
||||
set(${PROJECT_NAME}_IS_TOP_LEVEL OFF CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
|
||||
else()
|
||||
set(PROJECT_IS_TOP_LEVEL ON CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
|
||||
set(${PROJECT_NAME}_IS_TOP_LEVEL ON CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
|
||||
endif()
|
||||
unset(parent_directory)
|
||||
endif()
|
||||
|
||||
# The library version is based on libtool versioning of the ABI. The set of
|
||||
# rules for updating the version can be found here:
|
||||
# https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
|
||||
# All changes in experimental modules are treated as if they don't affect the
|
||||
# interface and therefore only increase the revision.
|
||||
set(${PROJECT_NAME}_LIB_VERSION_CURRENT 3)
|
||||
set(${PROJECT_NAME}_LIB_VERSION_REVISION 0)
|
||||
set(${PROJECT_NAME}_LIB_VERSION_AGE 1)
|
||||
|
||||
set(CMAKE_C_STANDARD 90)
|
||||
set(CMAKE_C_EXTENSIONS OFF)
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
|
||||
option(BUILD_SHARED_LIBS "Build shared libraries." ON)
|
||||
option(SECP256K1_DISABLE_SHARED "Disable shared library. Overrides BUILD_SHARED_LIBS." OFF)
|
||||
if(SECP256K1_DISABLE_SHARED)
|
||||
set(BUILD_SHARED_LIBS OFF)
|
||||
endif()
|
||||
|
||||
option(SECP256K1_INSTALL "Enable installation." ${PROJECT_IS_TOP_LEVEL})
|
||||
|
||||
option(SECP256K1_ENABLE_MODULE_ECDH "Enable ECDH module." ON)
|
||||
if(SECP256K1_ENABLE_MODULE_ECDH)
|
||||
add_compile_definitions(ENABLE_MODULE_ECDH=1)
|
||||
endif()
|
||||
|
||||
option(SECP256K1_ENABLE_MODULE_RECOVERY "Enable ECDSA pubkey recovery module." OFF)
|
||||
if(SECP256K1_ENABLE_MODULE_RECOVERY)
|
||||
add_compile_definitions(ENABLE_MODULE_RECOVERY=1)
|
||||
endif()
|
||||
|
||||
option(SECP256K1_ENABLE_MODULE_EXTRAKEYS "Enable extrakeys module." ON)
|
||||
option(SECP256K1_ENABLE_MODULE_SCHNORRSIG "Enable schnorrsig module." ON)
|
||||
if(SECP256K1_ENABLE_MODULE_SCHNORRSIG)
|
||||
set(SECP256K1_ENABLE_MODULE_EXTRAKEYS ON)
|
||||
add_compile_definitions(ENABLE_MODULE_SCHNORRSIG=1)
|
||||
endif()
|
||||
if(SECP256K1_ENABLE_MODULE_EXTRAKEYS)
|
||||
add_compile_definitions(ENABLE_MODULE_EXTRAKEYS=1)
|
||||
endif()
|
||||
|
||||
option(SECP256K1_ENABLE_MODULE_ELLSWIFT "Enable ElligatorSwift module." ON)
|
||||
if(SECP256K1_ENABLE_MODULE_ELLSWIFT)
|
||||
add_compile_definitions(ENABLE_MODULE_ELLSWIFT=1)
|
||||
endif()
|
||||
|
||||
option(SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS "Enable external default callback functions." OFF)
|
||||
if(SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS)
|
||||
add_compile_definitions(USE_EXTERNAL_DEFAULT_CALLBACKS=1)
|
||||
endif()
|
||||
|
||||
set(SECP256K1_ECMULT_WINDOW_SIZE "AUTO" CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24]. \"AUTO\" is a reasonable setting for desktop machines (currently 15). [default=AUTO]")
|
||||
set_property(CACHE SECP256K1_ECMULT_WINDOW_SIZE PROPERTY STRINGS "AUTO" 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24)
|
||||
include(CheckStringOptionValue)
|
||||
check_string_option_value(SECP256K1_ECMULT_WINDOW_SIZE)
|
||||
if(SECP256K1_ECMULT_WINDOW_SIZE STREQUAL "AUTO")
|
||||
set(SECP256K1_ECMULT_WINDOW_SIZE 15)
|
||||
endif()
|
||||
add_compile_definitions(ECMULT_WINDOW_SIZE=${SECP256K1_ECMULT_WINDOW_SIZE})
|
||||
|
||||
set(SECP256K1_ECMULT_GEN_PREC_BITS "AUTO" CACHE STRING "Precision bits to tune the precomputed table size for signing, specified as integer 2, 4 or 8. \"AUTO\" is a reasonable setting for desktop machines (currently 4). [default=AUTO]")
|
||||
set_property(CACHE SECP256K1_ECMULT_GEN_PREC_BITS PROPERTY STRINGS "AUTO" 2 4 8)
|
||||
check_string_option_value(SECP256K1_ECMULT_GEN_PREC_BITS)
|
||||
if(SECP256K1_ECMULT_GEN_PREC_BITS STREQUAL "AUTO")
|
||||
set(SECP256K1_ECMULT_GEN_PREC_BITS 4)
|
||||
endif()
|
||||
add_compile_definitions(ECMULT_GEN_PREC_BITS=${SECP256K1_ECMULT_GEN_PREC_BITS})
|
||||
|
||||
set(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY "OFF" CACHE STRING "Test-only override of the (autodetected by the C code) \"widemul\" setting. Legal values are: \"OFF\", \"int128_struct\", \"int128\" or \"int64\". [default=OFF]")
|
||||
set_property(CACHE SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY PROPERTY STRINGS "OFF" "int128_struct" "int128" "int64")
|
||||
check_string_option_value(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
|
||||
if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
|
||||
string(TOUPPER "${SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY}" widemul_upper_value)
|
||||
add_compile_definitions(USE_FORCE_WIDEMUL_${widemul_upper_value}=1)
|
||||
endif()
|
||||
mark_as_advanced(FORCE SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
|
||||
|
||||
set(SECP256K1_ASM "AUTO" CACHE STRING "Assembly optimizations to use: \"AUTO\", \"OFF\", \"x86_64\" or \"arm32\" (experimental). [default=AUTO]")
|
||||
set_property(CACHE SECP256K1_ASM PROPERTY STRINGS "AUTO" "OFF" "x86_64" "arm32")
|
||||
check_string_option_value(SECP256K1_ASM)
|
||||
if(SECP256K1_ASM STREQUAL "arm32")
|
||||
enable_language(ASM)
|
||||
include(CheckArm32Assembly)
|
||||
check_arm32_assembly()
|
||||
if(HAVE_ARM32_ASM)
|
||||
add_compile_definitions(USE_EXTERNAL_ASM=1)
|
||||
else()
|
||||
message(FATAL_ERROR "ARM32 assembly optimization requested but not available.")
|
||||
endif()
|
||||
elseif(SECP256K1_ASM)
|
||||
include(CheckX86_64Assembly)
|
||||
check_x86_64_assembly()
|
||||
if(HAVE_X86_64_ASM)
|
||||
set(SECP256K1_ASM "x86_64")
|
||||
add_compile_definitions(USE_ASM_X86_64=1)
|
||||
elseif(SECP256K1_ASM STREQUAL "AUTO")
|
||||
set(SECP256K1_ASM "OFF")
|
||||
else()
|
||||
message(FATAL_ERROR "x86_64 assembly optimization requested but not available.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(SECP256K1_EXPERIMENTAL "Allow experimental configuration options." OFF)
|
||||
if(NOT SECP256K1_EXPERIMENTAL)
|
||||
if(SECP256K1_ASM STREQUAL "arm32")
|
||||
message(FATAL_ERROR "ARM32 assembly optimization is experimental. Use -DSECP256K1_EXPERIMENTAL=ON to allow.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(SECP256K1_VALGRIND "AUTO" CACHE STRING "Build with extra checks for running inside Valgrind. [default=AUTO]")
|
||||
set_property(CACHE SECP256K1_VALGRIND PROPERTY STRINGS "AUTO" "OFF" "ON")
|
||||
check_string_option_value(SECP256K1_VALGRIND)
|
||||
if(SECP256K1_VALGRIND)
|
||||
find_package(Valgrind MODULE)
|
||||
if(Valgrind_FOUND)
|
||||
set(SECP256K1_VALGRIND ON)
|
||||
include_directories(${Valgrind_INCLUDE_DIR})
|
||||
add_compile_definitions(VALGRIND)
|
||||
elseif(SECP256K1_VALGRIND STREQUAL "AUTO")
|
||||
set(SECP256K1_VALGRIND OFF)
|
||||
else()
|
||||
message(FATAL_ERROR "Valgrind support requested but valgrind/memcheck.h header not available.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(SECP256K1_BUILD_BENCHMARK "Build benchmarks." ON)
|
||||
option(SECP256K1_BUILD_TESTS "Build tests." ON)
|
||||
option(SECP256K1_BUILD_EXHAUSTIVE_TESTS "Build exhaustive tests." ON)
|
||||
option(SECP256K1_BUILD_CTIME_TESTS "Build constant-time tests." ${SECP256K1_VALGRIND})
|
||||
option(SECP256K1_BUILD_EXAMPLES "Build examples." OFF)
|
||||
|
||||
# Redefine configuration flags.
|
||||
# We leave assertions on, because they are only used in the examples, and we want them always on there.
|
||||
if(MSVC)
|
||||
string(REGEX REPLACE "/DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
string(REGEX REPLACE "/DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}")
|
||||
string(REGEX REPLACE "/DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL}")
|
||||
else()
|
||||
string(REGEX REPLACE "-DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
string(REGEX REPLACE "-DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}")
|
||||
string(REGEX REPLACE "-DNDEBUG[ \t\r\n]*" "" CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL}")
|
||||
# Prefer -O2 optimization level. (-O3 is CMake's default for Release for many compilers.)
|
||||
string(REGEX REPLACE "-O3[ \t\r\n]*" "-O2" CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}")
|
||||
endif()
|
||||
|
||||
# Define custom "Coverage" build type.
|
||||
set(CMAKE_C_FLAGS_COVERAGE "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O0 -DCOVERAGE=1 --coverage" CACHE STRING
|
||||
"Flags used by the C compiler during \"Coverage\" builds."
|
||||
FORCE
|
||||
)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} --coverage" CACHE STRING
|
||||
"Flags used for linking binaries during \"Coverage\" builds."
|
||||
FORCE
|
||||
)
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} --coverage" CACHE STRING
|
||||
"Flags used by the shared libraries linker during \"Coverage\" builds."
|
||||
FORCE
|
||||
)
|
||||
mark_as_advanced(
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
)
|
||||
|
||||
get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
set(default_build_type "RelWithDebInfo")
|
||||
if(is_multi_config)
|
||||
set(CMAKE_CONFIGURATION_TYPES "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage" CACHE STRING
|
||||
"Supported configuration types."
|
||||
FORCE
|
||||
)
|
||||
else()
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY
|
||||
STRINGS "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage"
|
||||
)
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
message(STATUS "Setting build type to \"${default_build_type}\" as none was specified")
|
||||
set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE STRING
|
||||
"Choose the type of build."
|
||||
FORCE
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(TryAppendCFlags)
|
||||
if(MSVC)
|
||||
# Keep the following commands ordered lexicographically.
|
||||
try_append_c_flags(/W3) # Production quality warning level.
|
||||
try_append_c_flags(/wd4146) # Disable warning C4146 "unary minus operator applied to unsigned type, result still unsigned".
|
||||
try_append_c_flags(/wd4244) # Disable warning C4244 "'conversion' conversion from 'type1' to 'type2', possible loss of data".
|
||||
try_append_c_flags(/wd4267) # Disable warning C4267 "'var' : conversion from 'size_t' to 'type', possible loss of data".
|
||||
# Eliminate deprecation warnings for the older, less secure functions.
|
||||
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
|
||||
else()
|
||||
# Keep the following commands ordered lexicographically.
|
||||
try_append_c_flags(-pedantic)
|
||||
try_append_c_flags(-Wall) # GCC >= 2.95 and probably many other compilers.
|
||||
try_append_c_flags(-Wcast-align) # GCC >= 2.95.
|
||||
try_append_c_flags(-Wcast-align=strict) # GCC >= 8.0.
|
||||
try_append_c_flags(-Wconditional-uninitialized) # Clang >= 3.0 only.
|
||||
try_append_c_flags(-Wextra) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions.
|
||||
try_append_c_flags(-Wnested-externs)
|
||||
try_append_c_flags(-Wno-long-long) # GCC >= 3.0, -Wlong-long is implied by -pedantic.
|
||||
try_append_c_flags(-Wno-overlength-strings) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic.
|
||||
try_append_c_flags(-Wno-unused-function) # GCC >= 3.0, -Wunused-function is implied by -Wall.
|
||||
try_append_c_flags(-Wreserved-identifier) # Clang >= 13.0 only.
|
||||
try_append_c_flags(-Wshadow)
|
||||
try_append_c_flags(-Wstrict-prototypes)
|
||||
try_append_c_flags(-Wundef)
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_VISIBILITY_PRESET hidden)
|
||||
|
||||
# Ask CTest to create a "check" target (e.g., make check) as alias for the "test" target.
|
||||
# CTEST_TEST_TARGET_ALIAS is not documented but supposed to be user-facing.
|
||||
# See: https://gitlab.kitware.com/cmake/cmake/-/commit/816c9d1aa1f2b42d40c81a991b68c96eb12b6d2
|
||||
set(CTEST_TEST_TARGET_ALIAS check)
|
||||
include(CTest)
|
||||
# We do not use CTest's BUILD_TESTING because a single toggle for all tests is too coarse for our needs.
|
||||
mark_as_advanced(BUILD_TESTING)
|
||||
if(SECP256K1_BUILD_BENCHMARK OR SECP256K1_BUILD_TESTS OR SECP256K1_BUILD_EXHAUSTIVE_TESTS OR SECP256K1_BUILD_CTIME_TESTS OR SECP256K1_BUILD_EXAMPLES)
|
||||
enable_testing()
|
||||
endif()
|
||||
|
||||
add_subdirectory(src)
|
||||
if(SECP256K1_BUILD_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
||||
message("\n")
|
||||
message("secp256k1 configure summary")
|
||||
message("===========================")
|
||||
message("Build artifacts:")
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set(library_type "Shared")
|
||||
else()
|
||||
set(library_type "Static")
|
||||
endif()
|
||||
|
||||
message(" library type ........................ ${library_type}")
|
||||
message("Optional modules:")
|
||||
message(" ECDH ................................ ${SECP256K1_ENABLE_MODULE_ECDH}")
|
||||
message(" ECDSA pubkey recovery ............... ${SECP256K1_ENABLE_MODULE_RECOVERY}")
|
||||
message(" extrakeys ........................... ${SECP256K1_ENABLE_MODULE_EXTRAKEYS}")
|
||||
message(" schnorrsig .......................... ${SECP256K1_ENABLE_MODULE_SCHNORRSIG}")
|
||||
message(" ElligatorSwift ...................... ${SECP256K1_ENABLE_MODULE_ELLSWIFT}")
|
||||
message("Parameters:")
|
||||
message(" ecmult window size .................. ${SECP256K1_ECMULT_WINDOW_SIZE}")
|
||||
message(" ecmult gen precision bits ........... ${SECP256K1_ECMULT_GEN_PREC_BITS}")
|
||||
message("Optional features:")
|
||||
message(" assembly optimization ............... ${SECP256K1_ASM}")
|
||||
message(" external callbacks .................. ${SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS}")
|
||||
if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
|
||||
message(" wide multiplication (test-only) ..... ${SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY}")
|
||||
endif()
|
||||
message("Optional binaries:")
|
||||
message(" benchmark ........................... ${SECP256K1_BUILD_BENCHMARK}")
|
||||
message(" noverify_tests ...................... ${SECP256K1_BUILD_TESTS}")
|
||||
set(tests_status "${SECP256K1_BUILD_TESTS}")
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Coverage")
|
||||
set(tests_status OFF)
|
||||
endif()
|
||||
message(" tests ............................... ${tests_status}")
|
||||
message(" exhaustive tests .................... ${SECP256K1_BUILD_EXHAUSTIVE_TESTS}")
|
||||
message(" ctime_tests ......................... ${SECP256K1_BUILD_CTIME_TESTS}")
|
||||
message(" examples ............................ ${SECP256K1_BUILD_EXAMPLES}")
|
||||
message("")
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
set(cross_status "TRUE, for ${CMAKE_SYSTEM_NAME}, ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
else()
|
||||
set(cross_status "FALSE")
|
||||
endif()
|
||||
message("Cross compiling ....................... ${cross_status}")
|
||||
message("Valgrind .............................. ${SECP256K1_VALGRIND}")
|
||||
get_directory_property(definitions COMPILE_DEFINITIONS)
|
||||
string(REPLACE ";" " " definitions "${definitions}")
|
||||
message("Preprocessor defined macros ........... ${definitions}")
|
||||
message("C compiler ............................ ${CMAKE_C_COMPILER}")
|
||||
message("CFLAGS ................................ ${CMAKE_C_FLAGS}")
|
||||
get_directory_property(compile_options COMPILE_OPTIONS)
|
||||
string(REPLACE ";" " " compile_options "${compile_options}")
|
||||
message("Compile options ....................... " ${compile_options})
|
||||
if(NOT is_multi_config)
|
||||
message("Build type:")
|
||||
message(" - CMAKE_BUILD_TYPE ................... ${CMAKE_BUILD_TYPE}")
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" build_type)
|
||||
message(" - CFLAGS ............................. ${CMAKE_C_FLAGS_${build_type}}")
|
||||
message(" - LDFLAGS for executables ............ ${CMAKE_EXE_LINKER_FLAGS_${build_type}}")
|
||||
message(" - LDFLAGS for shared libraries ....... ${CMAKE_SHARED_LINKER_FLAGS_${build_type}}")
|
||||
else()
|
||||
message("Supported configurations .............. ${CMAKE_CONFIGURATION_TYPES}")
|
||||
message("RelWithDebInfo configuration:")
|
||||
message(" - CFLAGS ............................. ${CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
message(" - LDFLAGS for executables ............ ${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
|
||||
message(" - LDFLAGS for shared libraries ....... ${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
|
||||
message("Debug configuration:")
|
||||
message(" - CFLAGS ............................. ${CMAKE_C_FLAGS_DEBUG}")
|
||||
message(" - LDFLAGS for executables ............ ${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
|
||||
message(" - LDFLAGS for shared libraries ....... ${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
|
||||
endif()
|
||||
message("\n")
|
||||
if(SECP256K1_EXPERIMENTAL)
|
||||
message(
|
||||
" ******\n"
|
||||
" WARNING: experimental build\n"
|
||||
" Experimental features do not have stable APIs or properties, and may not be safe for production use.\n"
|
||||
" ******\n"
|
||||
)
|
||||
endif()
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0},
|
||||
"version": 3,
|
||||
"configurePresets": [
|
||||
{
|
||||
"name": "dev-mode",
|
||||
"displayName": "Development mode (intended only for developers of the library)",
|
||||
"cacheVariables": {
|
||||
"SECP256K1_EXPERIMENTAL": "ON",
|
||||
"SECP256K1_ENABLE_MODULE_RECOVERY": "ON",
|
||||
"SECP256K1_BUILD_EXAMPLES": "ON"
|
||||
},
|
||||
"warnings": {
|
||||
"dev": true,
|
||||
"uninitialized": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +1,3 @@
|
|||
.PHONY: clean-precomp precomp
|
||||
|
||||
ACLOCAL_AMFLAGS = -I build-aux/m4
|
||||
|
||||
# AM_CFLAGS will be automatically prepended to CFLAGS by Automake when compiling some foo
|
||||
|
@ -8,7 +6,7 @@ AM_CFLAGS = $(SECP_CFLAGS)
|
|||
|
||||
lib_LTLIBRARIES = libsecp256k1.la
|
||||
include_HEADERS = include/secp256k1.h
|
||||
include_HEADERS += include/rustsecp256k1_v0_8_1_preallocated.h
|
||||
include_HEADERS += include/rustsecp256k1_v0_9_0_preallocated.h
|
||||
noinst_HEADERS =
|
||||
noinst_HEADERS += src/scalar.h
|
||||
noinst_HEADERS += src/scalar_4x64.h
|
||||
|
@ -47,6 +45,7 @@ noinst_HEADERS += src/modinv64_impl.h
|
|||
noinst_HEADERS += src/precomputed_ecmult.h
|
||||
noinst_HEADERS += src/precomputed_ecmult_gen.h
|
||||
noinst_HEADERS += src/assumptions.h
|
||||
noinst_HEADERS += src/checkmem.h
|
||||
noinst_HEADERS += src/util.h
|
||||
noinst_HEADERS += src/int128.h
|
||||
noinst_HEADERS += src/int128_impl.h
|
||||
|
@ -64,19 +63,22 @@ noinst_HEADERS += src/hash_impl.h
|
|||
noinst_HEADERS += src/field.h
|
||||
noinst_HEADERS += src/field_impl.h
|
||||
noinst_HEADERS += src/bench.h
|
||||
noinst_HEADERS += src/wycheproof/ecdsa_rustsecp256k1_v0_9_0_sha256_bitcoin_test.h
|
||||
noinst_HEADERS += contrib/lax_der_parsing.h
|
||||
noinst_HEADERS += contrib/lax_der_parsing.c
|
||||
noinst_HEADERS += contrib/lax_der_privatekey_parsing.h
|
||||
noinst_HEADERS += contrib/lax_der_privatekey_parsing.c
|
||||
noinst_HEADERS += examples/random.h
|
||||
noinst_HEADERS += examples/examples_util.h
|
||||
|
||||
PRECOMPUTED_LIB = librustsecp256k1_v0_8_1_precomputed.la
|
||||
PRECOMPUTED_LIB = librustsecp256k1_v0_9_0_precomputed.la
|
||||
noinst_LTLIBRARIES = $(PRECOMPUTED_LIB)
|
||||
librustsecp256k1_v0_8_1_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c
|
||||
librustsecp256k1_v0_8_1_precomputed_la_CPPFLAGS = $(SECP_INCLUDES)
|
||||
librustsecp256k1_v0_9_0_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c
|
||||
# We need `-I$(top_srcdir)/src` in VPATH builds if librustsecp256k1_v0_9_0_precomputed_la_SOURCES have been recreated in the build tree.
|
||||
# This helps users and packagers who insist on recreating the precomputed files (e.g., Gentoo).
|
||||
librustsecp256k1_v0_9_0_precomputed_la_CPPFLAGS = -I$(top_srcdir)/src $(SECP_CONFIG_DEFINES)
|
||||
|
||||
if USE_EXTERNAL_ASM
|
||||
COMMON_LIB = librustsecp256k1_v0_8_1_common.la
|
||||
COMMON_LIB = librustsecp256k1_v0_9_0_common.la
|
||||
else
|
||||
COMMON_LIB =
|
||||
endif
|
||||
|
@ -87,60 +89,63 @@ pkgconfig_DATA = libsecp256k1.pc
|
|||
|
||||
if USE_EXTERNAL_ASM
|
||||
if USE_ASM_ARM
|
||||
librustsecp256k1_v0_8_1_common_la_SOURCES = src/asm/field_10x26_arm.s
|
||||
librustsecp256k1_v0_9_0_common_la_SOURCES = src/asm/field_10x26_arm.s
|
||||
endif
|
||||
endif
|
||||
|
||||
librustsecp256k1_v0_8_1_la_SOURCES = src/secp256k1.c
|
||||
librustsecp256k1_v0_8_1_la_CPPFLAGS = $(SECP_INCLUDES)
|
||||
librustsecp256k1_v0_8_1_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
librustsecp256k1_v0_8_1_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE)
|
||||
|
||||
if VALGRIND_ENABLED
|
||||
librustsecp256k1_v0_8_1_la_CPPFLAGS += -DVALGRIND
|
||||
endif
|
||||
librustsecp256k1_v0_9_0_la_SOURCES = src/secp256k1.c
|
||||
librustsecp256k1_v0_9_0_la_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
librustsecp256k1_v0_9_0_la_LIBADD = $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
librustsecp256k1_v0_9_0_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE)
|
||||
|
||||
noinst_PROGRAMS =
|
||||
if USE_BENCHMARK
|
||||
noinst_PROGRAMS += bench bench_internal bench_ecmult
|
||||
bench_SOURCES = src/bench.c
|
||||
bench_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
|
||||
bench_LDADD = libsecp256k1.la
|
||||
bench_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
bench_internal_SOURCES = src/bench_internal.c
|
||||
bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
bench_internal_CPPFLAGS = $(SECP_INCLUDES)
|
||||
bench_internal_LDADD = $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
bench_internal_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
bench_ecmult_SOURCES = src/bench_ecmult.c
|
||||
bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
bench_ecmult_CPPFLAGS = $(SECP_INCLUDES)
|
||||
bench_ecmult_LDADD = $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
bench_ecmult_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
endif
|
||||
|
||||
TESTS =
|
||||
if USE_TESTS
|
||||
noinst_PROGRAMS += tests
|
||||
tests_SOURCES = src/tests.c
|
||||
tests_CPPFLAGS = $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
|
||||
if VALGRIND_ENABLED
|
||||
tests_CPPFLAGS += -DVALGRIND
|
||||
noinst_PROGRAMS += valgrind_ctime_test
|
||||
valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c
|
||||
valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
|
||||
endif
|
||||
TESTS += noverify_tests
|
||||
noinst_PROGRAMS += noverify_tests
|
||||
noverify_tests_SOURCES = src/tests.c
|
||||
noverify_tests_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
noverify_tests_LDADD = $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
noverify_tests_LDFLAGS = -static
|
||||
if !ENABLE_COVERAGE
|
||||
tests_CPPFLAGS += -DVERIFY
|
||||
endif
|
||||
tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB)
|
||||
tests_LDFLAGS = -static
|
||||
TESTS += tests
|
||||
noinst_PROGRAMS += tests
|
||||
tests_SOURCES = $(noverify_tests_SOURCES)
|
||||
tests_CPPFLAGS = $(noverify_tests_CPPFLAGS) -DVERIFY
|
||||
tests_LDADD = $(noverify_tests_LDADD)
|
||||
tests_LDFLAGS = $(noverify_tests_LDFLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
if USE_CTIME_TESTS
|
||||
noinst_PROGRAMS += ctime_tests
|
||||
ctime_tests_SOURCES = src/ctime_tests.c
|
||||
ctime_tests_LDADD = libsecp256k1.la
|
||||
ctime_tests_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
endif
|
||||
|
||||
if USE_EXHAUSTIVE_TESTS
|
||||
noinst_PROGRAMS += exhaustive_tests
|
||||
exhaustive_tests_SOURCES = src/tests_exhaustive.c
|
||||
exhaustive_tests_CPPFLAGS = $(SECP_INCLUDES)
|
||||
exhaustive_tests_CPPFLAGS = $(SECP_CONFIG_DEFINES)
|
||||
if !ENABLE_COVERAGE
|
||||
exhaustive_tests_CPPFLAGS += -DVERIFY
|
||||
endif
|
||||
# Note: do not include $(PRECOMPUTED_LIB) in exhaustive_tests (it uses runtime-generated tables).
|
||||
exhaustive_tests_LDADD = $(SECP_LIBS) $(COMMON_LIB)
|
||||
exhaustive_tests_LDADD = $(COMMON_LIB)
|
||||
exhaustive_tests_LDFLAGS = -static
|
||||
TESTS += exhaustive_tests
|
||||
endif
|
||||
|
@ -148,7 +153,7 @@ endif
|
|||
if USE_EXAMPLES
|
||||
noinst_PROGRAMS += ecdsa_example
|
||||
ecdsa_example_SOURCES = examples/ecdsa.c
|
||||
ecdsa_example_CPPFLAGS = -I$(top_srcdir)/include
|
||||
ecdsa_example_CPPFLAGS = -I$(top_srcdir)/include -DSECP256K1_STATIC
|
||||
ecdsa_example_LDADD = libsecp256k1.la
|
||||
ecdsa_example_LDFLAGS = -static
|
||||
if BUILD_WINDOWS
|
||||
|
@ -158,7 +163,7 @@ TESTS += ecdsa_example
|
|||
if ENABLE_MODULE_ECDH
|
||||
noinst_PROGRAMS += ecdh_example
|
||||
ecdh_example_SOURCES = examples/ecdh.c
|
||||
ecdh_example_CPPFLAGS = -I$(top_srcdir)/include
|
||||
ecdh_example_CPPFLAGS = -I$(top_srcdir)/include -DSECP256K1_STATIC
|
||||
ecdh_example_LDADD = libsecp256k1.la
|
||||
ecdh_example_LDFLAGS = -static
|
||||
if BUILD_WINDOWS
|
||||
|
@ -169,7 +174,7 @@ endif
|
|||
if ENABLE_MODULE_SCHNORRSIG
|
||||
noinst_PROGRAMS += schnorr_example
|
||||
schnorr_example_SOURCES = examples/schnorr.c
|
||||
schnorr_example_CPPFLAGS = -I$(top_srcdir)/include
|
||||
schnorr_example_CPPFLAGS = -I$(top_srcdir)/include -DSECP256K1_STATIC
|
||||
schnorr_example_LDADD = libsecp256k1.la
|
||||
schnorr_example_LDFLAGS = -static
|
||||
if BUILD_WINDOWS
|
||||
|
@ -184,19 +189,19 @@ EXTRA_PROGRAMS = precompute_ecmult precompute_ecmult_gen
|
|||
CLEANFILES = $(EXTRA_PROGRAMS)
|
||||
|
||||
precompute_ecmult_SOURCES = src/precompute_ecmult.c
|
||||
precompute_ecmult_CPPFLAGS = $(SECP_INCLUDES)
|
||||
precompute_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB)
|
||||
precompute_ecmult_CPPFLAGS = $(SECP_CONFIG_DEFINES) -DVERIFY
|
||||
precompute_ecmult_LDADD = $(COMMON_LIB)
|
||||
|
||||
precompute_ecmult_gen_SOURCES = src/precompute_ecmult_gen.c
|
||||
precompute_ecmult_gen_CPPFLAGS = $(SECP_INCLUDES)
|
||||
precompute_ecmult_gen_LDADD = $(SECP_LIBS) $(COMMON_LIB)
|
||||
precompute_ecmult_gen_CPPFLAGS = $(SECP_CONFIG_DEFINES) -DVERIFY
|
||||
precompute_ecmult_gen_LDADD = $(COMMON_LIB)
|
||||
|
||||
# See Automake manual, Section "Errors with distclean".
|
||||
# We don't list any dependencies for the prebuilt files here because
|
||||
# otherwise make's decision whether to rebuild them (even in the first
|
||||
# build by a normal user) depends on mtimes, and thus is very fragile.
|
||||
# This means that rebuilds of the prebuilt files always need to be
|
||||
# forced by deleting them, e.g., by invoking `make clean-precomp`.
|
||||
# forced by deleting them.
|
||||
src/precomputed_ecmult.c:
|
||||
$(MAKE) $(AM_MAKEFLAGS) precompute_ecmult$(EXEEXT)
|
||||
./precompute_ecmult$(EXEEXT)
|
||||
|
@ -211,11 +216,29 @@ precomp: $(PRECOMP)
|
|||
# e.g., after `make maintainer-clean`).
|
||||
BUILT_SOURCES = $(PRECOMP)
|
||||
|
||||
maintainer-clean-local: clean-precomp
|
||||
|
||||
.PHONY: clean-precomp
|
||||
clean-precomp:
|
||||
rm -f $(PRECOMP)
|
||||
maintainer-clean-local: clean-precomp
|
||||
|
||||
### Pregenerated test vectors
|
||||
### (see the comments in the previous section for detailed rationale)
|
||||
TESTVECTORS = src/wycheproof/ecdsa_rustsecp256k1_v0_9_0_sha256_bitcoin_test.h
|
||||
|
||||
src/wycheproof/ecdsa_rustsecp256k1_v0_9_0_sha256_bitcoin_test.h:
|
||||
mkdir -p $(@D)
|
||||
python3 $(top_srcdir)/tools/tests_wycheproof_generate.py $(top_srcdir)/src/wycheproof/ecdsa_rustsecp256k1_v0_9_0_sha256_bitcoin_test.json > $@
|
||||
|
||||
testvectors: $(TESTVECTORS)
|
||||
|
||||
BUILT_SOURCES += $(TESTVECTORS)
|
||||
|
||||
.PHONY: clean-testvectors
|
||||
clean-testvectors:
|
||||
rm -f $(TESTVECTORS)
|
||||
maintainer-clean-local: clean-testvectors
|
||||
|
||||
### Additional files to distribute
|
||||
EXTRA_DIST = autogen.sh CHANGELOG.md SECURITY.md
|
||||
EXTRA_DIST += doc/release-process.md doc/safegcd_implementation.md
|
||||
EXTRA_DIST += examples/EXAMPLES_COPYING
|
||||
|
@ -223,8 +246,11 @@ EXTRA_DIST += sage/gen_exhaustive_groups.sage
|
|||
EXTRA_DIST += sage/gen_split_lambda_constants.sage
|
||||
EXTRA_DIST += sage/group_prover.sage
|
||||
EXTRA_DIST += sage/prove_group_implementations.sage
|
||||
EXTRA_DIST += sage/rustsecp256k1_v0_8_1_params.sage
|
||||
EXTRA_DIST += sage/rustsecp256k1_v0_9_0_params.sage
|
||||
EXTRA_DIST += sage/weierstrass_prover.sage
|
||||
EXTRA_DIST += src/wycheproof/WYCHEPROOF_COPYING
|
||||
EXTRA_DIST += src/wycheproof/ecdsa_rustsecp256k1_v0_9_0_sha256_bitcoin_test.json
|
||||
EXTRA_DIST += tools/tests_wycheproof_generate.py
|
||||
|
||||
if ENABLE_MODULE_ECDH
|
||||
include src/modules/ecdh/Makefile.am.include
|
||||
|
@ -241,3 +267,7 @@ endif
|
|||
if ENABLE_MODULE_SCHNORRSIG
|
||||
include src/modules/schnorrsig/Makefile.am.include
|
||||
endif
|
||||
|
||||
if ENABLE_MODULE_ELLSWIFT
|
||||
include src/modules/ellswift/Makefile.am.include
|
||||
endif
|
||||
|
|
|
@ -60,10 +60,8 @@ Implementation details
|
|||
* Optional runtime blinding which attempts to frustrate differential power analysis.
|
||||
* The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
|
||||
|
||||
Build steps
|
||||
-----------
|
||||
|
||||
libsecp256k1 is built using autotools:
|
||||
Building with Autotools
|
||||
-----------------------
|
||||
|
||||
$ ./autogen.sh
|
||||
$ ./configure
|
||||
|
@ -73,6 +71,43 @@ libsecp256k1 is built using autotools:
|
|||
|
||||
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
|
||||
|
||||
Building with CMake (experimental)
|
||||
----------------------------------
|
||||
|
||||
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
|
||||
|
||||
### Building on POSIX systems
|
||||
|
||||
$ mkdir build && cd build
|
||||
$ cmake ..
|
||||
$ make
|
||||
$ make check # run the test suite
|
||||
$ sudo make install # optional
|
||||
|
||||
To compile optional modules (such as Schnorr signatures), you need to run `cmake` with additional flags (such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG=ON`). Run `cmake .. -LH` to see the full list of available flags.
|
||||
|
||||
### Cross compiling
|
||||
|
||||
To alleviate issues with cross compiling, preconfigured toolchain files are available in the `cmake` directory.
|
||||
For example, to cross compile for Windows:
|
||||
|
||||
$ cmake .. -DCMAKE_TOOLCHAIN_FILE=../cmake/x86_64-w64-mingw32.toolchain.cmake
|
||||
|
||||
To cross compile for Android with [NDK](https://developer.android.com/ndk/guides/cmake) (using NDK's toolchain file, and assuming the `ANDROID_NDK_ROOT` environment variable has been set):
|
||||
|
||||
$ cmake .. -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake" -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28
|
||||
|
||||
### Building on Windows
|
||||
|
||||
To build on Windows with Visual Studio, a proper [generator](https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators) must be specified for a new build tree.
|
||||
|
||||
The following example assumes using of Visual Studio 2022 and CMake v3.21+.
|
||||
|
||||
In "Developer Command Prompt for VS 2022":
|
||||
|
||||
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
|
||||
>cmake --build build --config RelWithDebInfo
|
||||
|
||||
Usage examples
|
||||
-----------
|
||||
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
|
||||
|
|
|
@ -1,12 +1,31 @@
|
|||
dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell.
|
||||
AC_DEFUN([SECP_64BIT_ASM_CHECK],[
|
||||
AC_DEFUN([SECP_X86_64_ASM_CHECK],[
|
||||
AC_MSG_CHECKING(for x86_64 assembly availability)
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
|
||||
#include <stdint.h>]],[[
|
||||
uint64_t a = 11, tmp;
|
||||
__asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx");
|
||||
]])],[has_64bit_asm=yes],[has_64bit_asm=no])
|
||||
AC_MSG_RESULT([$has_64bit_asm])
|
||||
]])], [has_x86_64_asm=yes], [has_x86_64_asm=no])
|
||||
AC_MSG_RESULT([$has_x86_64_asm])
|
||||
])
|
||||
|
||||
AC_DEFUN([SECP_ARM32_ASM_CHECK], [
|
||||
AC_MSG_CHECKING(for ARM32 assembly availability)
|
||||
SECP_ARM32_ASM_CHECK_CFLAGS_saved_CFLAGS="$CFLAGS"
|
||||
CFLAGS="-x assembler"
|
||||
AC_LINK_IFELSE([AC_LANG_SOURCE([[
|
||||
.syntax unified
|
||||
.eabi_attribute 24, 1
|
||||
.eabi_attribute 25, 1
|
||||
.text
|
||||
.global main
|
||||
main:
|
||||
ldr r0, =0x002A
|
||||
mov r7, #1
|
||||
swi 0
|
||||
]])], [has_arm32_asm=yes], [has_arm32_asm=no])
|
||||
AC_MSG_RESULT([$has_arm32_asm])
|
||||
CFLAGS="$SECP_ARM32_ASM_CHECK_CFLAGS_saved_CFLAGS"
|
||||
])
|
||||
|
||||
AC_DEFUN([SECP_VALGRIND_CHECK],[
|
||||
|
@ -20,7 +39,8 @@ if test x"$has_valgrind" != x"yes"; then
|
|||
#if defined(NVALGRIND)
|
||||
# error "Valgrind does not support this platform."
|
||||
#endif
|
||||
]])], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed, and it supports the host platform])])
|
||||
]])], [has_valgrind=yes])
|
||||
CPPFLAGS="$CPPFLAGS_TEMP"
|
||||
fi
|
||||
AC_MSG_RESULT($has_valgrind)
|
||||
])
|
||||
|
|
|
@ -1,42 +1,46 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -x
|
||||
set -eux
|
||||
|
||||
export LC_ALL=C
|
||||
|
||||
# Print relevant CI environment to allow reproducing the job outside of CI.
|
||||
# Print commit and relevant CI environment to allow reproducing the job outside of CI.
|
||||
git show --no-patch
|
||||
print_environment() {
|
||||
# Turn off -x because it messes up the output
|
||||
set +x
|
||||
# There are many ways to print variable names and their content. This one
|
||||
# does not rely on bash.
|
||||
for i in WERROR_CFLAGS MAKEFLAGS BUILD \
|
||||
for var in WERROR_CFLAGS MAKEFLAGS BUILD \
|
||||
ECMULTWINDOW ECMULTGENPRECISION ASM WIDEMUL WITH_VALGRIND EXTRAFLAGS \
|
||||
EXPERIMENTAL ECDH RECOVERY SCHNORRSIG \
|
||||
SECP256K1_TEST_ITERS BENCH SECP256K1_BENCH_ITERS CTIMETEST\
|
||||
EXPERIMENTAL ECDH RECOVERY SCHNORRSIG ELLSWIFT \
|
||||
SECP256K1_TEST_ITERS BENCH SECP256K1_BENCH_ITERS CTIMETESTS\
|
||||
EXAMPLES \
|
||||
WRAPPER_CMD CC AR NM HOST
|
||||
HOST WRAPPER_CMD \
|
||||
CC CFLAGS CPPFLAGS AR NM
|
||||
do
|
||||
eval 'printf "%s %s " "$i=\"${'"$i"'}\""'
|
||||
eval "isset=\${$var+x}"
|
||||
if [ -n "$isset" ]; then
|
||||
eval "val=\${$var}"
|
||||
# shellcheck disable=SC2154
|
||||
printf '%s="%s" ' "$var" "$val"
|
||||
fi
|
||||
done
|
||||
echo "$0"
|
||||
set -x
|
||||
}
|
||||
print_environment
|
||||
|
||||
# Start persistent wineserver if necessary.
|
||||
# This speeds up jobs with many invocations of wine (e.g., ./configure with MSVC) tremendously.
|
||||
case "$WRAPPER_CMD" in
|
||||
*wine*)
|
||||
# This is apparently only reliable when we run a dummy command such as "hh.exe" afterwards.
|
||||
wineserver -p && wine hh.exe
|
||||
env >> test_env.log
|
||||
|
||||
# If gcc is requested, assert that it's in fact gcc (and not some symlinked Apple clang).
|
||||
case "${CC:-undefined}" in
|
||||
*gcc*)
|
||||
$CC -v 2>&1 | grep -q "gcc version" || exit 1;
|
||||
;;
|
||||
esac
|
||||
|
||||
env >> test_env.log
|
||||
|
||||
if [ -n "$CC" ]; then
|
||||
if [ -n "${CC+x}" ]; then
|
||||
# The MSVC compiler "cl" doesn't understand "-v"
|
||||
$CC -v || true
|
||||
fi
|
||||
|
@ -47,6 +51,22 @@ if [ -n "$WRAPPER_CMD" ]; then
|
|||
$WRAPPER_CMD --version
|
||||
fi
|
||||
|
||||
# Workaround for https://bugs.kde.org/show_bug.cgi?id=452758 (fixed in valgrind 3.20.0).
|
||||
case "${CC:-undefined}" in
|
||||
clang*)
|
||||
if [ "$CTIMETESTS" = "yes" ] && [ "$WITH_VALGRIND" = "yes" ]
|
||||
then
|
||||
export CFLAGS="${CFLAGS:+$CFLAGS }-gdwarf-4"
|
||||
else
|
||||
case "$WRAPPER_CMD" in
|
||||
valgrind*)
|
||||
export CFLAGS="${CFLAGS:+$CFLAGS }-gdwarf-4"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
./autogen.sh
|
||||
|
||||
./configure \
|
||||
|
@ -55,8 +75,10 @@ fi
|
|||
--with-ecmult-window="$ECMULTWINDOW" \
|
||||
--with-ecmult-gen-precision="$ECMULTGENPRECISION" \
|
||||
--enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
|
||||
--enable-module-ellswift="$ELLSWIFT" \
|
||||
--enable-module-schnorrsig="$SCHNORRSIG" \
|
||||
--enable-examples="$EXAMPLES" \
|
||||
--enable-ctime-tests="$CTIMETESTS" \
|
||||
--with-valgrind="$WITH_VALGRIND" \
|
||||
--host="$HOST" $EXTRAFLAGS
|
||||
|
||||
|
@ -73,14 +95,15 @@ export LOG_COMPILER="$WRAPPER_CMD"
|
|||
|
||||
make "$BUILD"
|
||||
|
||||
if [ "$BENCH" = "yes" ]
|
||||
then
|
||||
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
|
||||
EXEC='./libtool --mode=execute'
|
||||
if [ -n "$WRAPPER_CMD" ]
|
||||
then
|
||||
EXEC="$EXEC $WRAPPER_CMD"
|
||||
fi
|
||||
|
||||
if [ "$BENCH" = "yes" ]
|
||||
then
|
||||
{
|
||||
$EXEC ./bench_ecmult
|
||||
$EXEC ./bench_internal
|
||||
|
@ -88,21 +111,22 @@ then
|
|||
} >> bench.log 2>&1
|
||||
fi
|
||||
|
||||
if [ "$CTIMETEST" = "yes" ]
|
||||
if [ "$CTIMETESTS" = "yes" ]
|
||||
then
|
||||
./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1
|
||||
if [ "$WITH_VALGRIND" = "yes" ]; then
|
||||
./libtool --mode=execute valgrind --error-exitcode=42 ./ctime_tests > ctime_tests.log 2>&1
|
||||
else
|
||||
$EXEC ./ctime_tests > ctime_tests.log 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Rebuild precomputed files (if not cross-compiling).
|
||||
if [ -z "$HOST" ]
|
||||
then
|
||||
make clean-precomp
|
||||
make precomp
|
||||
make clean-precomp clean-testvectors
|
||||
make precomp testvectors
|
||||
fi
|
||||
|
||||
# Shutdown wineserver again
|
||||
wineserver -k || true
|
||||
|
||||
# Check that no repo files have been modified by the build.
|
||||
# (This fails for example if the precomp files need to be updated in the repo.)
|
||||
git diff --exit-code
|
|
@ -1,4 +1,17 @@
|
|||
FROM debian:stable
|
||||
FROM debian:stable-slim
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# A too high maximum number of file descriptors (with the default value
|
||||
# inherited from the docker host) can cause issues with some of our tools:
|
||||
# - sanitizers hanging: https://github.com/google/sanitizers/issues/1662
|
||||
# - valgrind crashing: https://stackoverflow.com/a/75293014
|
||||
# This is not be a problem on our CI hosts, but developers who run the image
|
||||
# on their machines may run into this (e.g., on Arch Linux), so warn them.
|
||||
# (Note that .bashrc is only executed in interactive bash shells.)
|
||||
RUN echo 'if [[ $(ulimit -n) -gt 200000 ]]; then echo "WARNING: Very high value reported by \"ulimit -n\". Consider passing \"--ulimit nofile=32768\" to \"docker run\"."; fi' >> /root/.bashrc
|
||||
|
||||
RUN dpkg --add-architecture i386 && \
|
||||
dpkg --add-architecture s390x && \
|
||||
|
@ -11,27 +24,52 @@ RUN dpkg --add-architecture i386 && \
|
|||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
git ca-certificates \
|
||||
make automake libtool pkg-config dpkg-dev valgrind qemu-user \
|
||||
gcc clang llvm libc6-dbg \
|
||||
gcc clang llvm libclang-rt-dev libc6-dbg \
|
||||
g++ \
|
||||
gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan6:i386 \
|
||||
gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan8:i386 \
|
||||
gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \
|
||||
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \
|
||||
gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \
|
||||
gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \
|
||||
gcc-mingw-w64-x86-64-win32 wine64 wine \
|
||||
gcc-mingw-w64-i686-win32 wine32 \
|
||||
sagemath
|
||||
python3
|
||||
|
||||
WORKDIR /root
|
||||
# The "wine" package provides a convience wrapper that we need
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
git ca-certificates wine64 wine python3-simplejson python3-six msitools winbind procps && \
|
||||
git clone https://github.com/mstorsjo/msvc-wine && \
|
||||
mkdir /opt/msvc && \
|
||||
python3 msvc-wine/vsdownload.py --accept-license --dest /opt/msvc Microsoft.VisualStudio.Workload.VCTools && \
|
||||
msvc-wine/install.sh /opt/msvc
|
||||
# Build and install gcc snapshot
|
||||
ARG GCC_SNAPSHOT_MAJOR=14
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y wget libgmp-dev libmpfr-dev libmpc-dev flex && \
|
||||
mkdir gcc && cd gcc && \
|
||||
wget --progress=dot:giga --https-only --recursive --accept '*.tar.xz' --level 1 --no-directories "https://gcc.gnu.org/pub/gcc/snapshots/LATEST-${GCC_SNAPSHOT_MAJOR}" && \
|
||||
wget "https://gcc.gnu.org/pub/gcc/snapshots/LATEST-${GCC_SNAPSHOT_MAJOR}/sha512.sum" && \
|
||||
sha512sum --check --ignore-missing sha512.sum && \
|
||||
# We should have downloaded exactly one tar.xz file
|
||||
ls && \
|
||||
[[ $(ls *.tar.xz | wc -l) -eq "1" ]] && \
|
||||
tar xf *.tar.xz && \
|
||||
mkdir gcc-build && cd gcc-build && \
|
||||
../*/configure --prefix=/opt/gcc-snapshot --enable-languages=c --disable-bootstrap --disable-multilib --without-isl && \
|
||||
make -j $(nproc) && \
|
||||
make install && \
|
||||
cd ../.. && rm -rf gcc && \
|
||||
ln -s /opt/gcc-snapshot/bin/gcc /usr/bin/gcc-snapshot && \
|
||||
apt-get autoremove -y wget libgmp-dev libmpfr-dev libmpc-dev flex && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install clang snapshot, see https://apt.llvm.org/
|
||||
RUN \
|
||||
# Setup GPG keys of LLVM repository
|
||||
apt-get update && apt-get install --no-install-recommends -y wget && \
|
||||
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc && \
|
||||
# Add repository for this Debian release
|
||||
. /etc/os-release && echo "deb http://apt.llvm.org/${VERSION_CODENAME} llvm-toolchain-${VERSION_CODENAME} main" >> /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
# Determine the version number of the LLVM development branch
|
||||
LLVM_VERSION=$(apt-cache search --names-only '^clang-[0-9]+$' | sort -V | tail -1 | cut -f1 -d" " | cut -f2 -d"-" ) && \
|
||||
# Install
|
||||
apt-get install --no-install-recommends -y "clang-${LLVM_VERSION}" && \
|
||||
# Create symlink
|
||||
ln -s "/usr/bin/clang-${LLVM_VERSION}" /usr/bin/clang-snapshot && \
|
||||
# Clean up
|
||||
apt-get autoremove -y wget && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Initialize the wine environment. Wait until the wineserver process has
|
||||
# exited before closing the session, to avoid corrupting the wine prefix.
|
||||
RUN wine64 wineboot --init && \
|
||||
while (ps -A | grep wineserver) > /dev/null; do sleep 1; done
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
function(check_arm32_assembly)
|
||||
try_compile(HAVE_ARM32_ASM
|
||||
${CMAKE_BINARY_DIR}/check_arm32_assembly
|
||||
SOURCES ${CMAKE_SOURCE_DIR}/cmake/source_arm32.s
|
||||
)
|
||||
endfunction()
|
|
@ -0,0 +1,10 @@
|
|||
function(check_string_option_value option)
|
||||
get_property(expected_values CACHE ${option} PROPERTY STRINGS)
|
||||
if(expected_values)
|
||||
if(${option} IN_LIST expected_values)
|
||||
return()
|
||||
endif()
|
||||
message(FATAL_ERROR "${option} value is \"${${option}}\", but must be one of ${expected_values}.")
|
||||
endif()
|
||||
message(AUTHOR_WARNING "The STRINGS property must be set before invoking `check_string_option_value' function.")
|
||||
endfunction()
|
|
@ -0,0 +1,14 @@
|
|||
include(CheckCSourceCompiles)
|
||||
|
||||
function(check_x86_64_assembly)
|
||||
check_c_source_compiles("
|
||||
#include <stdint.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
uint64_t a = 11, tmp;
|
||||
__asm__ __volatile__(\"movq $0x100000000,%1; mulq %%rsi\" : \"+a\"(a) : \"S\"(tmp) : \"cc\", \"%rdx\");
|
||||
}
|
||||
" HAVE_X86_64_ASM)
|
||||
set(HAVE_X86_64_ASM ${HAVE_X86_64_ASM} PARENT_SCOPE)
|
||||
endfunction()
|
|
@ -0,0 +1,41 @@
|
|||
if(CMAKE_HOST_APPLE)
|
||||
find_program(BREW_COMMAND brew)
|
||||
execute_process(
|
||||
COMMAND ${BREW_COMMAND} --prefix valgrind
|
||||
OUTPUT_VARIABLE valgrind_brew_prefix
|
||||
ERROR_QUIET
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
endif()
|
||||
|
||||
set(hints_paths)
|
||||
if(valgrind_brew_prefix)
|
||||
set(hints_paths ${valgrind_brew_prefix}/include)
|
||||
endif()
|
||||
|
||||
find_path(Valgrind_INCLUDE_DIR
|
||||
NAMES valgrind/memcheck.h
|
||||
HINTS ${hints_paths}
|
||||
)
|
||||
|
||||
if(Valgrind_INCLUDE_DIR)
|
||||
include(CheckCSourceCompiles)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${Valgrind_INCLUDE_DIR})
|
||||
check_c_source_compiles("
|
||||
#include <valgrind/memcheck.h>
|
||||
#if defined(NVALGRIND)
|
||||
# error \"Valgrind does not support this platform.\"
|
||||
#endif
|
||||
|
||||
int main() {}
|
||||
" Valgrind_WORKS)
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Valgrind
|
||||
REQUIRED_VARS Valgrind_INCLUDE_DIR Valgrind_WORKS
|
||||
)
|
||||
|
||||
mark_as_advanced(
|
||||
Valgrind_INCLUDE_DIR
|
||||
)
|
|
@ -0,0 +1,24 @@
|
|||
include(CheckCCompilerFlag)
|
||||
|
||||
function(rustsecp256k1_v0_9_0_check_c_flags_internal flags output)
|
||||
string(MAKE_C_IDENTIFIER "${flags}" result)
|
||||
string(TOUPPER "${result}" result)
|
||||
set(result "C_SUPPORTS_${result}")
|
||||
if(NOT MSVC)
|
||||
set(CMAKE_REQUIRED_FLAGS "-Werror")
|
||||
endif()
|
||||
|
||||
# This avoids running a linker.
|
||||
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
check_c_compiler_flag("${flags}" ${result})
|
||||
|
||||
set(${output} ${${result}} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Append flags to the COMPILE_OPTIONS directory property if CC accepts them.
|
||||
macro(try_append_c_flags)
|
||||
rustsecp256k1_v0_9_0_check_c_flags_internal("${ARGV}" result)
|
||||
if(result)
|
||||
add_compile_options(${ARGV})
|
||||
endif()
|
||||
endmacro()
|
|
@ -0,0 +1,3 @@
|
|||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set(CMAKE_SYSTEM_PROCESSOR arm)
|
||||
set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
|
|
@ -0,0 +1,5 @@
|
|||
@PACKAGE_INIT@
|
||||
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@-targets.cmake")
|
||||
|
||||
check_required_components(@PROJECT_NAME@)
|
|
@ -0,0 +1,9 @@
|
|||
.syntax unified
|
||||
.eabi_attribute 24, 1
|
||||
.eabi_attribute 25, 1
|
||||
.text
|
||||
.global main
|
||||
main:
|
||||
ldr r0, =0x002A
|
||||
mov r7, #1
|
||||
swi 0
|
|
@ -0,0 +1,3 @@
|
|||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc)
|
|
@ -4,7 +4,7 @@ AC_PREREQ([2.60])
|
|||
# the API. All changes in experimental modules are treated as
|
||||
# backwards-compatible and therefore at most increase the minor version.
|
||||
define(_PKG_VERSION_MAJOR, 0)
|
||||
define(_PKG_VERSION_MINOR, 2)
|
||||
define(_PKG_VERSION_MINOR, 4)
|
||||
define(_PKG_VERSION_PATCH, 0)
|
||||
define(_PKG_VERSION_IS_RELEASE, true)
|
||||
|
||||
|
@ -13,18 +13,15 @@ define(_PKG_VERSION_IS_RELEASE, true)
|
|||
# https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
|
||||
# All changes in experimental modules are treated as if they don't affect the
|
||||
# interface and therefore only increase the revision.
|
||||
define(_LIB_VERSION_CURRENT, 1)
|
||||
define(_LIB_VERSION_CURRENT, 3)
|
||||
define(_LIB_VERSION_REVISION, 0)
|
||||
define(_LIB_VERSION_AGE, 0)
|
||||
define(_LIB_VERSION_AGE, 1)
|
||||
|
||||
AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_PATCH)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-dev]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1])
|
||||
|
||||
AC_CONFIG_AUX_DIR([build-aux])
|
||||
AC_CONFIG_MACRO_DIR([build-aux/m4])
|
||||
AC_CANONICAL_HOST
|
||||
AH_TOP([#ifndef LIBSECP256K1_CONFIG_H])
|
||||
AH_TOP([#define LIBSECP256K1_CONFIG_H])
|
||||
AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/])
|
||||
|
||||
# Require Automake 1.11.2 for AM_PROG_AR
|
||||
AM_INIT_AUTOMAKE([1.11.2 foreign subdir-objects])
|
||||
|
@ -32,6 +29,11 @@ AM_INIT_AUTOMAKE([1.11.2 foreign subdir-objects])
|
|||
# Make the compilation flags quiet unless V=1 is used.
|
||||
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
|
||||
|
||||
if test "${CFLAGS+set}" = "set"; then
|
||||
CFLAGS_overridden=yes
|
||||
else
|
||||
CFLAGS_overridden=no
|
||||
fi
|
||||
AC_PROG_CC
|
||||
AM_PROG_AS
|
||||
AM_PROG_AR
|
||||
|
@ -91,11 +93,14 @@ esac
|
|||
AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [
|
||||
# GCC and compatible (incl. clang)
|
||||
if test "x$GCC" = "xyes"; then
|
||||
# Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will
|
||||
# not error out if it gets unknown warning flags and the checks here will always succeed
|
||||
# no matter if clang knows the flag or not.
|
||||
# Try to append -Werror to CFLAGS temporarily. Otherwise checks for some unsupported
|
||||
# flags will succeed.
|
||||
# Note that failure to append -Werror does not necessarily mean that -Werror is not
|
||||
# supported. The compiler may already be warning about something unrelated, for example
|
||||
# about some path issue. If that is the case, -Werror cannot be used because all
|
||||
# of those warnings would be turned into errors.
|
||||
SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS"
|
||||
SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS)
|
||||
SECP_TRY_APPEND_CFLAGS([-Werror], CFLAGS)
|
||||
|
||||
SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic.
|
||||
SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic.
|
||||
|
@ -105,6 +110,7 @@ AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [
|
|||
SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95
|
||||
SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0
|
||||
SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only
|
||||
SECP_TRY_APPEND_CFLAGS([-Wreserved-identifier], $1) # Clang >= 13.0 only
|
||||
SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0
|
||||
|
||||
CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS"
|
||||
|
@ -115,8 +121,12 @@ AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [
|
|||
# libtool makes the same assumption internally.
|
||||
# Note that "/opt" and "-opt" are equivalent for MSVC; we use "-opt" because "/opt" looks like a path.
|
||||
if test x"$GCC" != x"yes" && test x"$build_windows" = x"yes"; then
|
||||
SECP_TRY_APPEND_CFLAGS([-W2 -wd4146], $1) # Moderate warning level, disable warning C4146 "unary minus operator applied to unsigned type, result still unsigned"
|
||||
SECP_TRY_APPEND_CFLAGS([-external:anglebrackets -external:W0], $1) # Suppress warnings from #include <...> files
|
||||
SECP_TRY_APPEND_CFLAGS([-W3], $1) # Production quality warning level.
|
||||
SECP_TRY_APPEND_CFLAGS([-wd4146], $1) # Disable warning C4146 "unary minus operator applied to unsigned type, result still unsigned".
|
||||
SECP_TRY_APPEND_CFLAGS([-wd4244], $1) # Disable warning C4244 "'conversion' conversion from 'type1' to 'type2', possible loss of data".
|
||||
SECP_TRY_APPEND_CFLAGS([-wd4267], $1) # Disable warning C4267 "'var' : conversion from 'size_t' to 'type', possible loss of data".
|
||||
# Eliminate deprecation warnings for the older, less secure functions.
|
||||
CPPFLAGS="-D_CRT_SECURE_NO_WARNINGS $CPPFLAGS"
|
||||
fi
|
||||
])
|
||||
SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS)
|
||||
|
@ -142,6 +152,10 @@ AC_ARG_ENABLE(tests,
|
|||
AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), [],
|
||||
[SECP_SET_DEFAULT([enable_tests], [yes], [yes])])
|
||||
|
||||
AC_ARG_ENABLE(ctime_tests,
|
||||
AS_HELP_STRING([--enable-ctime-tests],[compile constant-time tests [default=yes if valgrind enabled]]), [],
|
||||
[SECP_SET_DEFAULT([enable_ctime_tests], [auto], [auto])])
|
||||
|
||||
AC_ARG_ENABLE(experimental,
|
||||
AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), [],
|
||||
[SECP_SET_DEFAULT([enable_experimental], [no], [yes])])
|
||||
|
@ -170,6 +184,10 @@ AC_ARG_ENABLE(module_schnorrsig,
|
|||
AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module [default=yes]]), [],
|
||||
[SECP_SET_DEFAULT([enable_module_schnorrsig], [yes], [yes])])
|
||||
|
||||
AC_ARG_ENABLE(module_ellswift,
|
||||
AS_HELP_STRING([--enable-module-ellswift],[enable ElligatorSwift module [default=yes]]), [],
|
||||
[SECP_SET_DEFAULT([enable_module_ellswift], [yes], [yes])])
|
||||
|
||||
AC_ARG_ENABLE(external_default_callbacks,
|
||||
AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]), [],
|
||||
[SECP_SET_DEFAULT([enable_external_default_callbacks], [no], [no])])
|
||||
|
@ -182,8 +200,8 @@ AC_ARG_ENABLE(external_default_callbacks,
|
|||
# * and auto (the default).
|
||||
AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto])
|
||||
|
||||
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
|
||||
[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
|
||||
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm32|no|auto],
|
||||
[assembly optimizations to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto])
|
||||
|
||||
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
|
||||
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
|
||||
|
@ -225,11 +243,20 @@ else
|
|||
enable_valgrind=yes
|
||||
fi
|
||||
fi
|
||||
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
|
||||
|
||||
if test x"$enable_ctime_tests" = x"auto"; then
|
||||
enable_ctime_tests=$enable_valgrind
|
||||
fi
|
||||
|
||||
if test x"$enable_coverage" = x"yes"; then
|
||||
AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DCOVERAGE=1"
|
||||
SECP_CFLAGS="-O0 --coverage $SECP_CFLAGS"
|
||||
# If coverage is enabled, and the user has not overridden CFLAGS,
|
||||
# override Autoconf's value "-g -O2" with "-g". Otherwise we'd end up
|
||||
# with "-O0 --coverage -g -O2".
|
||||
if test "$CFLAGS_overridden" = "no"; then
|
||||
CFLAGS="-g"
|
||||
fi
|
||||
LDFLAGS="--coverage $LDFLAGS"
|
||||
else
|
||||
# Most likely the CFLAGS already contain -O2 because that is autoconf's default.
|
||||
|
@ -239,8 +266,8 @@ else
|
|||
fi
|
||||
|
||||
if test x"$req_asm" = x"auto"; then
|
||||
SECP_64BIT_ASM_CHECK
|
||||
if test x"$has_64bit_asm" = x"yes"; then
|
||||
SECP_X86_64_ASM_CHECK
|
||||
if test x"$has_x86_64_asm" = x"yes"; then
|
||||
set_asm=x86_64
|
||||
fi
|
||||
if test x"$set_asm" = x; then
|
||||
|
@ -250,12 +277,16 @@ else
|
|||
set_asm=$req_asm
|
||||
case $set_asm in
|
||||
x86_64)
|
||||
SECP_64BIT_ASM_CHECK
|
||||
if test x"$has_64bit_asm" != x"yes"; then
|
||||
SECP_X86_64_ASM_CHECK
|
||||
if test x"$has_x86_64_asm" != x"yes"; then
|
||||
AC_MSG_ERROR([x86_64 assembly optimization requested but not available])
|
||||
fi
|
||||
;;
|
||||
arm)
|
||||
arm32)
|
||||
SECP_ARM32_ASM_CHECK
|
||||
if test x"$has_arm32_asm" != x"yes"; then
|
||||
AC_MSG_ERROR([ARM32 assembly optimization requested but not available])
|
||||
fi
|
||||
;;
|
||||
no)
|
||||
;;
|
||||
|
@ -270,9 +301,9 @@ enable_external_asm=no
|
|||
|
||||
case $set_asm in
|
||||
x86_64)
|
||||
AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_ASM_X86_64=1"
|
||||
;;
|
||||
arm)
|
||||
arm32)
|
||||
enable_external_asm=yes
|
||||
;;
|
||||
no)
|
||||
|
@ -283,20 +314,20 @@ no)
|
|||
esac
|
||||
|
||||
if test x"$enable_external_asm" = x"yes"; then
|
||||
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_EXTERNAL_ASM=1"
|
||||
fi
|
||||
|
||||
|
||||
# Select wide multiplication implementation
|
||||
case $set_widemul in
|
||||
int128_struct)
|
||||
AC_DEFINE(USE_FORCE_WIDEMUL_INT128_STRUCT, 1, [Define this symbol to force the use of the structure for simulating (unsigned) int128 based wide multiplication])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_FORCE_WIDEMUL_INT128_STRUCT=1"
|
||||
;;
|
||||
int128)
|
||||
AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_FORCE_WIDEMUL_INT128=1"
|
||||
;;
|
||||
int64)
|
||||
AC_DEFINE(USE_FORCE_WIDEMUL_INT64, 1, [Define this symbol to force the use of the (u)int64_t based wide multiplication implementation])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_FORCE_WIDEMUL_INT64=1"
|
||||
;;
|
||||
auto)
|
||||
;;
|
||||
|
@ -323,7 +354,7 @@ case $set_ecmult_window in
|
|||
# not in range
|
||||
AC_MSG_ERROR($error_window_size)
|
||||
fi
|
||||
AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DECMULT_WINDOW_SIZE=$set_ecmult_window"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@ -336,7 +367,7 @@ fi
|
|||
|
||||
case $set_ecmult_gen_precision in
|
||||
2|4|8)
|
||||
AC_DEFINE_UNQUOTED(ECMULT_GEN_PREC_BITS, $set_ecmult_gen_precision, [Set ecmult gen precision bits])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DECMULT_GEN_PREC_BITS=$set_ecmult_gen_precision"
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR(['ecmult gen precision not 2, 4, 8 or "auto"'])
|
||||
|
@ -344,7 +375,7 @@ case $set_ecmult_gen_precision in
|
|||
esac
|
||||
|
||||
if test x"$enable_valgrind" = x"yes"; then
|
||||
SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS"
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES $VALGRIND_CPPFLAGS -DVALGRIND"
|
||||
fi
|
||||
|
||||
# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI).
|
||||
|
@ -357,26 +388,30 @@ SECP_CFLAGS="$SECP_CFLAGS $WERROR_CFLAGS"
|
|||
###
|
||||
|
||||
if test x"$enable_module_ecdh" = x"yes"; then
|
||||
AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DENABLE_MODULE_ECDH=1"
|
||||
fi
|
||||
|
||||
if test x"$enable_module_recovery" = x"yes"; then
|
||||
AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DENABLE_MODULE_RECOVERY=1"
|
||||
fi
|
||||
|
||||
if test x"$enable_module_schnorrsig" = x"yes"; then
|
||||
AC_DEFINE(ENABLE_MODULE_SCHNORRSIG, 1, [Define this symbol to enable the schnorrsig module])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DENABLE_MODULE_SCHNORRSIG=1"
|
||||
enable_module_extrakeys=yes
|
||||
fi
|
||||
|
||||
if test x"$enable_module_ellswift" = x"yes"; then
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DENABLE_MODULE_ELLSWIFT=1"
|
||||
fi
|
||||
|
||||
# Test if extrakeys is set after the schnorrsig module to allow the schnorrsig
|
||||
# module to set enable_module_extrakeys=yes
|
||||
if test x"$enable_module_extrakeys" = x"yes"; then
|
||||
AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DENABLE_MODULE_EXTRAKEYS=1"
|
||||
fi
|
||||
|
||||
if test x"$enable_external_default_callbacks" = x"yes"; then
|
||||
AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used])
|
||||
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DUSE_EXTERNAL_DEFAULT_CALLBACKS=1"
|
||||
fi
|
||||
|
||||
###
|
||||
|
@ -389,8 +424,8 @@ if test x"$enable_experimental" = x"yes"; then
|
|||
AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.])
|
||||
AC_MSG_NOTICE([******])
|
||||
else
|
||||
if test x"$set_asm" = x"arm"; then
|
||||
AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.])
|
||||
if test x"$set_asm" = x"arm32"; then
|
||||
AC_MSG_ERROR([ARM32 assembly optimization is experimental. Use --enable-experimental to allow.])
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -398,15 +433,12 @@ fi
|
|||
### Generate output
|
||||
###
|
||||
|
||||
AC_CONFIG_HEADERS([src/libsecp256k1-config.h])
|
||||
AC_CONFIG_FILES([Makefile libsecp256k1.pc])
|
||||
AC_SUBST(SECP_INCLUDES)
|
||||
AC_SUBST(SECP_LIBS)
|
||||
AC_SUBST(SECP_TEST_LIBS)
|
||||
AC_SUBST(SECP_TEST_INCLUDES)
|
||||
AC_SUBST(SECP_CFLAGS)
|
||||
AC_SUBST(SECP_CONFIG_DEFINES)
|
||||
AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"])
|
||||
AM_CONDITIONAL([USE_TESTS], [test x"$enable_tests" != x"no"])
|
||||
AM_CONDITIONAL([USE_CTIME_TESTS], [test x"$enable_ctime_tests" = x"yes"])
|
||||
AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$enable_exhaustive_tests" != x"no"])
|
||||
AM_CONDITIONAL([USE_EXAMPLES], [test x"$enable_examples" != x"no"])
|
||||
AM_CONDITIONAL([USE_BENCHMARK], [test x"$enable_benchmark" = x"yes"])
|
||||
|
@ -414,8 +446,9 @@ AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"])
|
|||
AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"])
|
||||
AM_CONDITIONAL([ENABLE_MODULE_EXTRAKEYS], [test x"$enable_module_extrakeys" = x"yes"])
|
||||
AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = x"yes"])
|
||||
AM_CONDITIONAL([ENABLE_MODULE_ELLSWIFT], [test x"$enable_module_ellswift" = x"yes"])
|
||||
AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$enable_external_asm" = x"yes"])
|
||||
AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"])
|
||||
AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm32"])
|
||||
AM_CONDITIONAL([BUILD_WINDOWS], [test "$build_windows" = "yes"])
|
||||
AC_SUBST(LIB_VERSION_CURRENT, _LIB_VERSION_CURRENT)
|
||||
AC_SUBST(LIB_VERSION_REVISION, _LIB_VERSION_REVISION)
|
||||
|
@ -428,12 +461,14 @@ echo "Build Options:"
|
|||
echo " with external callbacks = $enable_external_default_callbacks"
|
||||
echo " with benchmarks = $enable_benchmark"
|
||||
echo " with tests = $enable_tests"
|
||||
echo " with ctime tests = $enable_ctime_tests"
|
||||
echo " with coverage = $enable_coverage"
|
||||
echo " with examples = $enable_examples"
|
||||
echo " module ecdh = $enable_module_ecdh"
|
||||
echo " module recovery = $enable_module_recovery"
|
||||
echo " module extrakeys = $enable_module_extrakeys"
|
||||
echo " module schnorrsig = $enable_module_schnorrsig"
|
||||
echo " module ellswift = $enable_module_ellswift"
|
||||
echo
|
||||
echo " asm = $set_asm"
|
||||
echo " ecmult window size = $set_ecmult_window"
|
||||
|
|
|
@ -7,8 +7,10 @@
|
|||
#include <string.h>
|
||||
|
||||
#include "lax_der_parsing.h"
|
||||
|
||||
int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_8_1_context* ctx, rustsecp256k1_v0_8_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
|
||||
extern int rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sig, const unsigned char *input64);
|
||||
int rustsecp256k1_v0_9_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_9_0_context* ctx, rustsecp256k1_v0_9_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
|
||||
size_t rpos, rlen, spos, slen;
|
||||
size_t pos = 0;
|
||||
size_t lenbyte;
|
||||
|
@ -16,7 +18,7 @@ int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_8_
|
|||
int overflow = 0;
|
||||
|
||||
/* Hack to initialize sig with a correctly-parsed but invalid signature. */
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
|
||||
/* Sequence tag byte */
|
||||
if (pos == inputlen || input[pos] != 0x30) {
|
||||
|
@ -137,11 +139,11 @@ int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_8_
|
|||
}
|
||||
|
||||
if (!overflow) {
|
||||
overflow = !rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
overflow = !rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
}
|
||||
if (overflow) {
|
||||
memset(tmpsig, 0, 64);
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
* certain violations are easily supported. You may need to adapt it.
|
||||
*
|
||||
* Do not use this for new systems. Use well-defined DER or compact signatures
|
||||
* instead if you have the choice (see rustsecp256k1_v0_8_1_ecdsa_signature_parse_der and
|
||||
* rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact).
|
||||
* instead if you have the choice (see rustsecp256k1_v0_9_0_ecdsa_signature_parse_der and
|
||||
* rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact).
|
||||
*
|
||||
* The supported violations are:
|
||||
* - All numbers are parsed as nonnegative integers, even though X.609-0207
|
||||
|
@ -83,9 +83,9 @@ extern "C" {
|
|||
* encoded numbers are out of range, signature validation with it is
|
||||
* guaranteed to fail for every message and public key.
|
||||
*/
|
||||
int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der_lax(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature* sig,
|
||||
int rustsecp256k1_v0_9_0_ecdsa_signature_parse_der_lax(
|
||||
const rustsecp256k1_v0_9_0_context* ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature* sig,
|
||||
const unsigned char *input,
|
||||
size_t inputlen
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
#include "lax_der_privatekey_parsing.h"
|
||||
|
||||
int ec_privkey_import_der(const rustsecp256k1_v0_8_1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
|
||||
int ec_privkey_import_der(const rustsecp256k1_v0_9_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
|
||||
const unsigned char *end = privkey + privkeylen;
|
||||
int lenb = 0;
|
||||
int len = 0;
|
||||
|
@ -45,17 +45,17 @@ int ec_privkey_import_der(const rustsecp256k1_v0_8_1_context* ctx, unsigned char
|
|||
return 0;
|
||||
}
|
||||
if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
|
||||
if (!rustsecp256k1_v0_8_1_ec_seckey_verify(ctx, out32)) {
|
||||
if (!rustsecp256k1_v0_9_0_ec_seckey_verify(ctx, out32)) {
|
||||
memset(out32, 0, 32);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ec_privkey_export_der(const rustsecp256k1_v0_8_1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey;
|
||||
int ec_privkey_export_der(const rustsecp256k1_v0_9_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
size_t pubkeylen = 0;
|
||||
if (!rustsecp256k1_v0_8_1_ec_pubkey_create(ctx, &pubkey, key32)) {
|
||||
if (!rustsecp256k1_v0_9_0_ec_pubkey_create(ctx, &pubkey, key32)) {
|
||||
*privkeylen = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_8_1_context *ctx, unsigned char
|
|||
memcpy(ptr, key32, 32); ptr += 32;
|
||||
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
|
||||
pubkeylen = 33;
|
||||
rustsecp256k1_v0_8_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
|
||||
rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
|
||||
ptr += pubkeylen;
|
||||
*privkeylen = ptr - privkey;
|
||||
} else {
|
||||
|
@ -104,7 +104,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_8_1_context *ctx, unsigned char
|
|||
memcpy(ptr, key32, 32); ptr += 32;
|
||||
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
|
||||
pubkeylen = 65;
|
||||
rustsecp256k1_v0_8_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
|
||||
rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
|
||||
ptr += pubkeylen;
|
||||
*privkeylen = ptr - privkey;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ extern "C" {
|
|||
/** Export a private key in DER format.
|
||||
*
|
||||
* Returns: 1 if the private key was valid.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: privkey: pointer to an array for storing the private key in BER.
|
||||
* Should have space for 279 bytes, and cannot be NULL.
|
||||
* privkeylen: Pointer to an int where the length of the private key in
|
||||
|
@ -57,10 +57,10 @@ extern "C" {
|
|||
* simple 32-byte private keys are sufficient.
|
||||
*
|
||||
* Note that this function does not guarantee correct DER output. It is
|
||||
* guaranteed to be parsable by rustsecp256k1_v0_8_1_ec_privkey_import_der
|
||||
* guaranteed to be parsable by rustsecp256k1_v0_9_0_ec_privkey_import_der
|
||||
*/
|
||||
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
const rustsecp256k1_v0_9_0_context* ctx,
|
||||
unsigned char *privkey,
|
||||
size_t *privkeylen,
|
||||
const unsigned char *seckey,
|
||||
|
@ -82,7 +82,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
|
|||
* key.
|
||||
*/
|
||||
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
const rustsecp256k1_v0_9_0_context* ctx,
|
||||
unsigned char *seckey,
|
||||
const unsigned char *privkey,
|
||||
size_t privkeylen
|
||||
|
|
|
@ -0,0 +1,483 @@
|
|||
# ElligatorSwift for secp256k1 explained
|
||||
|
||||
In this document we explain how the `ellswift` module implementation is related to the
|
||||
construction in the
|
||||
["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
|
||||
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
|
||||
|
||||
* [1. Introduction](#1-introduction)
|
||||
* [2. The decoding function](#2-the-decoding-function)
|
||||
+ [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
|
||||
* [3. The encoding function](#3-the-encoding-function)
|
||||
+ [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates)
|
||||
+ [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
|
||||
+ [3.3 Finding the inverse](#33-finding-the-inverse)
|
||||
+ [3.4 Dealing with special cases](#34-dealing-with-special-cases)
|
||||
+ [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
|
||||
* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
|
||||
+ [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
The `ellswift` module effectively introduces a new 64-byte public key format, with the property
|
||||
that (uniformly random) public keys can be encoded as 64-byte arrays which are computationally
|
||||
indistinguishable from uniform byte arrays. The module provides functions to convert public keys
|
||||
from and to this format, as well as convenience functions for key generation and ECDH that operate
|
||||
directly on ellswift-encoded keys.
|
||||
|
||||
The encoding consists of the concatenation of two (32-byte big endian) encoded field elements $u$
|
||||
and $t.$ Together they encode an x-coordinate on the curve $x$, or (see further) a full point $(x, y)$ on
|
||||
the curve.
|
||||
|
||||
**Decoding** consists of decoding the field elements $u$ and $t$ (values above the field size $p$
|
||||
are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$ results in a valid
|
||||
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
|
||||
|
||||
**Encoding** a given $x$ coordinate is conceptually done as follows:
|
||||
* Loop:
|
||||
* Pick a uniformly random field element $u.$
|
||||
* Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements.
|
||||
* With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
|
||||
* Select a uniformly random $t \in L$ and return $(u, t).$
|
||||
|
||||
This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full
|
||||
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
|
||||
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
|
||||
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
|
||||
almost all x-coordinates on the curve (all but at most 39) is close to two times the field size
|
||||
(specifically, it lies in the range $2q \pm (22\sqrt{q} + O(1))$, where $q$ is the size of the field).
|
||||
|
||||
## 2. The decoding function
|
||||
|
||||
First some definitions:
|
||||
* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
|
||||
* For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
|
||||
* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
|
||||
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
|
||||
This implies that the order of $E$ is either odd, or a multiple of *4*.
|
||||
If $a=0$, this condition is always fulfilled.
|
||||
* For `secp256k1`, $a=0$ and $b=7.$
|
||||
* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
|
||||
* Let the function $h(x) = 3x^3 + 4a.$
|
||||
* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
|
||||
* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
|
||||
* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
|
||||
* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
|
||||
|
||||
**Note**: In the paper:
|
||||
* $F_u$ corresponds to $F_{0,u}$ there.
|
||||
* $P_u(t)$ is called $P$ there.
|
||||
* All $S_u$ sets together correspond to $S$ there.
|
||||
* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
|
||||
|
||||
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
|
||||
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
|
||||
out of the three right-hand side factors an even number must be non-squares.
|
||||
This implies that exactly *1* or exactly *3* out of
|
||||
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
|
||||
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
|
||||
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
|
||||
|
||||
**Define** the decoding function $F_u(t)$ as:
|
||||
* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
|
||||
* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
|
||||
|
||||
$P_u(t) = (X(u, t), Y(u, t))$, where:
|
||||
|
||||
$$
|
||||
\begin{array}{lcl}
|
||||
X(u, t) & = & \left\\{\begin{array}{ll}
|
||||
\dfrac{g(u) - t^2}{2t} & a = 0 \\
|
||||
\dfrac{g(u) + h(u)(Y_0(u) - X_0(u)t)^2}{X_0(u)(1 + h(u)t^2)} & a \neq 0
|
||||
\end{array}\right. \\
|
||||
Y(u, t) & = & \left\\{\begin{array}{ll}
|
||||
\dfrac{X(u, t) + t}{u \sqrt{-3}} = \dfrac{g(u) + t^2}{2tu\sqrt{-3}} & a = 0 \\
|
||||
Y_0(u) + t(X(u, t) - X_0(u)) & a \neq 0
|
||||
\end{array}\right.
|
||||
\end{array}
|
||||
$$
|
||||
|
||||
$P_u(t)$ is defined:
|
||||
* For $a=0$, unless:
|
||||
* $u = 0$ or $t = 0$ (division by zero)
|
||||
* $g(u) = -t^2$ (would give $Y=0$).
|
||||
* For $a \neq 0$, unless:
|
||||
* $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
|
||||
* $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
|
||||
|
||||
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
|
||||
|
||||
The function $\psi_u$ is the same for all curves: $\psi_u(X, Y) = (x_1, x_2, x_3, z)$, where:
|
||||
|
||||
$$
|
||||
\begin{array}{lcl}
|
||||
x_1 & = & \dfrac{X}{2Y} - \dfrac{u}{2} && \\
|
||||
x_2 & = & -\dfrac{X}{2Y} - \dfrac{u}{2} && \\
|
||||
x_3 & = & u + 4Y^2 && \\
|
||||
z & = & \dfrac{g(x_3)}{2Y}(u^2 + ux_1 + x_1^2 + a) = \dfrac{-g(u)g(x_3)}{8Y^3}
|
||||
\end{array}
|
||||
$$
|
||||
|
||||
### 2.1 Decoding for `secp256k1`
|
||||
|
||||
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
|
||||
|
||||
**Define** $F_u(t)$ as:
|
||||
* Let $X = \dfrac{u^3 + b - t^2}{2t}.$
|
||||
* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
|
||||
* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
|
||||
|
||||
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
|
||||
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
|
||||
|
||||
**Define** $F_u(t)$ as:
|
||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
|
||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
|
||||
* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
|
||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||
* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
|
||||
|
||||
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
|
||||
but the approach here is simple enough and gives fairly uniform output even in these cases.
|
||||
|
||||
**Note**: in the paper these conditions result in $\infty$ as output, due to the use of projective coordinates there.
|
||||
We wish to avoid the need for callers to deal with this special case.
|
||||
|
||||
This is implemented in `rustsecp256k1_v0_9_0_ellswift_xswiftec_frac_var` (which decodes to an x-coordinate represented as a fraction), and
|
||||
in `rustsecp256k1_v0_9_0_ellswift_xswiftec_var` (which outputs the actual x-coordinate).
|
||||
|
||||
## 3. The encoding function
|
||||
|
||||
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
|
||||
* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
|
||||
* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
|
||||
* For each of the found $t$ values, verify that $F_u(t) = x.$
|
||||
* Return the remaining $t$ values.
|
||||
|
||||
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
|
||||
|
||||
$$
|
||||
P_u^{-1}(X, Y) = \left\\{\begin{array}{ll}
|
||||
Yu\sqrt{-3} - X & a = 0 \\
|
||||
\dfrac{Y-Y_0(u)}{X-X_0(u)} & a \neq 0 \land X \neq X_0(u) \\
|
||||
\dfrac{-X_0(u)}{h(u)Y_0(u)} & a \neq 0 \land X = X_0(u) \land Y = Y_0(u)
|
||||
\end{array}\right.
|
||||
$$
|
||||
|
||||
The third step above, verifying that $F_u(t) = x$, is necessary because for the $(X, Y)$ values found through the $x_1$ and $x_2$ expressions,
|
||||
it is possible that decoding through $\psi_u(X, Y)$ yields a valid $x_3$ on the curve, which would take precedence over the
|
||||
$x_1$ or $x_2$ decoding. These $(X, Y)$ solutions must be rejected.
|
||||
|
||||
Since we know that exactly one or exactly three out of $\\{x_1, x_2, x_3\\}$ are valid x-coordinates for any $t$,
|
||||
the case where either $x_1$ or $x_2$ is valid and in addition also $x_3$ is valid must mean that all three are valid.
|
||||
This means that instead of checking whether $x_3$ is on the curve, it is also possible to check whether the other one out of
|
||||
$x_1$ and $x_2$ is on the curve. This is significantly simpler, as it turns out.
|
||||
|
||||
Observe that $\psi_u$ guarantees that $x_1 + x_2 = -u.$ So given either $x = x_1$ or $x = x_2$, the other one of the two can be computed as
|
||||
$-u - x.$ Thus, when encoding $x$ through the $x_1$ or $x_2$ expressions, one can simply check whether $g(-u-x)$ is a square,
|
||||
and if so, not include the corresponding $t$ values in the returned set. As this does not need $X$, $Y$, or $t$, this condition can be determined
|
||||
before those values are computed.
|
||||
|
||||
It is not possible that an encoding found through the $x_1$ expression decodes to a different valid x-coordinate using $x_2$ (which would
|
||||
take precedence), for the same reason: if both $x_1$ and $x_2$ decodings were valid, $x_3$ would be valid as well, and thus take
|
||||
precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$ and $x_2$ is the only test necessary to guarantee the found $t$
|
||||
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
|
||||
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
|
||||
|
||||
### 3.1 Switching to *v, w* coordinates
|
||||
|
||||
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
|
||||
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
|
||||
* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
|
||||
* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
|
||||
* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
|
||||
|
||||
$$
|
||||
\begin{array}{lcl}
|
||||
x_1 & = & v \\
|
||||
x_2 & = & -u - v \\
|
||||
x_3 & = & u + w^2 \\
|
||||
z & = & \dfrac{g(x_3)}{w}(u^2 + uv + v^2 + a) = \dfrac{-g(u)g(x_3)}{w^3}
|
||||
\end{array}
|
||||
$$
|
||||
|
||||
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
|
||||
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
|
||||
* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||
* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||
* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
|
||||
|
||||
### 3.2 Avoiding computing all inverses
|
||||
|
||||
The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
|
||||
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
|
||||
|
||||
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
|
||||
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
|
||||
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
|
||||
|
||||
**Define** *ElligatorSwift(x)* as:
|
||||
* Loop:
|
||||
* Pick a uniformly random field element $u.$
|
||||
* Compute the set $L = F_u^{-1}(x).$
|
||||
* Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
|
||||
* Select a uniformly random $t \in T.$
|
||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||
|
||||
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
|
||||
random element in it, so we do not need to have all $\bot$ values at the end.
|
||||
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
|
||||
we can associate every index in $T$ with exactly one of those formulas, making sure that:
|
||||
* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
|
||||
* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
|
||||
* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
|
||||
|
||||
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
|
||||
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
|
||||
for an analysis of all the negligible cases.
|
||||
|
||||
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
|
||||
the loop can be simplified to only compute one of the inverses instead of all of them:
|
||||
|
||||
**Define** *ElligatorSwift(x)* as:
|
||||
* Loop:
|
||||
* Pick a uniformly random field element $u.$
|
||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
||||
* Let $t = G_{c,u}(x).$
|
||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||
|
||||
This is implemented in `rustsecp256k1_v0_9_0_ellswift_xelligatorswift_var`.
|
||||
|
||||
### 3.3 Finding the inverse
|
||||
|
||||
To implement $G_{c,u}$, we map $c=0$ to the $x_1$ formula, $c=1$ to the $x_2$ formula, and $c=2$ and $c=3$ to the $x_3$ formula.
|
||||
Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting that in each formula, $w$ is a square root of some expression).
|
||||
Ignoring the negligible cases, we get:
|
||||
|
||||
**Define** $G_{c,u}(x)$ as:
|
||||
* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
|
||||
* If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
|
||||
* If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
|
||||
* Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
|
||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
|
||||
* Let $s = x-u.$
|
||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||
* Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
|
||||
* Let $w = \sqrt{s}.$
|
||||
* Depending on $c:$
|
||||
* If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
|
||||
* If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
|
||||
|
||||
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
|
||||
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
|
||||
with negligible probability. A division by 0 in the first branch in fact cannot occur at all, because $u^2 + uv + v^2 + a = 0$
|
||||
implies $g(-u-x) = g(x)$ which would mean the $g(-u-x)$ is square condition has triggered
|
||||
and $\bot$ would have been returned already.
|
||||
|
||||
**Note**: In the paper, the $case$ variable corresponds roughly to the $c$ above, but only takes on 4 possible values (1 to 4).
|
||||
The conditional negation of $w$ at the end is done randomly, which is equivalent, but makes testing harder. We choose to
|
||||
have the $G_{c,u}$ be deterministic, and capture all choices in $c.$
|
||||
|
||||
Now observe that the $c \in \\{1, 5\\}$ and $c \in \\{3, 7\\}$ conditions effectively perform the same $v \rightarrow -u-v$
|
||||
transformation. Furthermore, that transformation has no effect on $s$ in the first branch
|
||||
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
|
||||
|
||||
**Define** $G_{c,u}(x)$ as:
|
||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
||||
* If $g(-u-x)$ is square, return $\bot.$
|
||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a).$
|
||||
* Let $v = x.$
|
||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||
* Let $s = x-u.$
|
||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||
* Let $v = (r/s - u)/2.$
|
||||
* Let $w = \sqrt{s}.$
|
||||
* Depending on $c:$
|
||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
|
||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
|
||||
* If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
|
||||
* If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
|
||||
|
||||
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
|
||||
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
|
||||
|
||||
### 3.4 Dealing with special cases
|
||||
|
||||
As mentioned before there are a few cases to deal with which only happen in a negligibly small subset of inputs.
|
||||
For cryptographically sized fields, if only random inputs are going to be considered, it is unnecessary to deal with these. Still, for completeness
|
||||
we analyse them here. They generally fall into two categories: cases in which the encoder would produce $t$ values that
|
||||
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
|
||||
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
|
||||
|
||||
* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
|
||||
* When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
|
||||
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
|
||||
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
|
||||
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
|
||||
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
|
||||
encoder anyway as there will generally be more than 8.
|
||||
* When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
|
||||
as it can deal with $g(u)=0$.
|
||||
This is again only possible on even-ordered curves.
|
||||
* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
|
||||
* When $s=0$, a division by zero would occur.
|
||||
* When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
|
||||
It is equivalent to checking whether $r=0$.
|
||||
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
|
||||
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
|
||||
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
|
||||
* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
|
||||
* For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
|
||||
* For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
|
||||
|
||||
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
|
||||
* If $a=0$ and $u=0$, return $\bot.$
|
||||
* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
|
||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||
* If $g(-u-x)$ is square, return $\bot.$
|
||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||
* Let $v = x.$
|
||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||
* Let $s = x-u.$
|
||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||
* If $s = 0$, return $\bot.$
|
||||
* Let $v = (r/s - u)/2.$
|
||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||
* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
|
||||
* Depending on $c:$
|
||||
* If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
|
||||
* If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
|
||||
* If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
|
||||
* If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
|
||||
* If $a=0$ and $t=0$, return $\bot$ (even curves only).
|
||||
* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
|
||||
* Return $t.$
|
||||
|
||||
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
|
||||
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
|
||||
* All cases where $P_u(t)$ is not defined:
|
||||
* For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
|
||||
* For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
|
||||
* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
|
||||
|
||||
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
|
||||
|
||||
### 3.5 Encoding for `secp256k1`
|
||||
|
||||
Specialized for odd-ordered $a=0$ curves:
|
||||
|
||||
**Define** $G_{c,u}(x)$ as:
|
||||
* If $u=0$, return $\bot.$
|
||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
||||
* If $(-u-x)^3 + b$ is square, return $\bot$
|
||||
* Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
|
||||
* Let $v = x.$
|
||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||
* Let $s = x-u.$
|
||||
* Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
|
||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||
* If $s = 0$, return $\bot.$
|
||||
* Let $v = (r/s - u)/2.$
|
||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||
* Depending on $c:$
|
||||
* If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
|
||||
* If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
|
||||
* If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
|
||||
* If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
|
||||
|
||||
This is implemented in `rustsecp256k1_v0_9_0_ellswift_xswiftec_inv_var`.
|
||||
|
||||
And the x-only ElligatorSwift encoding algorithm is still:
|
||||
|
||||
**Define** *ElligatorSwift(x)* as:
|
||||
* Loop:
|
||||
* Pick a uniformly random field element $u.$
|
||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
||||
* Let $t = G_{c,u}(x).$
|
||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||
|
||||
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
|
||||
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
|
||||
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
|
||||
|
||||
## 4. Encoding and decoding full *(x, y)* coordinates
|
||||
|
||||
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
|
||||
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
|
||||
in $t$ as well.
|
||||
|
||||
Note that for any $(X, Y) \in S_u$, $(\pm X, \pm Y)$ are all on $S_u.$ Moreover, all of these are
|
||||
mapped to the same x-coordinate. Negating $X$ or negating $Y$ just results in $x_1$ and $x_2$
|
||||
being swapped, and does not affect $x_3.$ This will not change the outcome x-coordinate as the order
|
||||
of $x_1$ and $x_2$ only matters if both were to be valid, and in that case $x_3$ would be used instead.
|
||||
|
||||
Still, these four $(X, Y)$ combinations all correspond to distinct $t$ values, so we can encode
|
||||
the sign of the y-coordinate in the sign of $X$ or the sign of $Y.$ They correspond to the
|
||||
four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$
|
||||
|
||||
**Note**: In the paper, the sign of the y coordinate is encoded in a separately-coded bit.
|
||||
|
||||
To encode the sign of $y$ in the sign of $Y:$
|
||||
|
||||
**Define** *Decode(u, t)* for full $(x, y)$ as:
|
||||
* Let $(X, Y) = P_u(t).$
|
||||
* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
|
||||
* Let $y = \sqrt{g(x)}.$
|
||||
* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
|
||||
|
||||
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
|
||||
|
||||
**Define** $G_{c,u}(x, y)$ as:
|
||||
* If $c \in \\{0, 1\\}:$
|
||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||
* If $g(-u-x)$ is square, return $\bot.$
|
||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||
* Let $v = x.$
|
||||
* Otherwise, when $c \in \\{2, 3\\}:$
|
||||
* Let $s = x-u.$
|
||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||
* If $c = 3$ and $r = 0$, return $\bot.$
|
||||
* Let $v = (r/s - u)/2.$
|
||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||
* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
|
||||
* Depending on $c:$
|
||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
|
||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
|
||||
|
||||
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
|
||||
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
|
||||
|
||||
In the above logic, $sign$ can be implemented in several ways, such as parity of the integer representation
|
||||
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
|
||||
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
|
||||
|
||||
### 4.1 Full *(x, y)* coordinates for `secp256k1`
|
||||
|
||||
For $a=0$ curves, there is another option. Note that for those,
|
||||
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
|
||||
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
|
||||
as decoder:
|
||||
|
||||
**Define** *Decode(u, t)* as:
|
||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise.
|
||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise.
|
||||
* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
|
||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||
* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
|
||||
* Let $y = \sqrt{g(x)}.$
|
||||
* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
|
||||
|
||||
This is implemented in `rustsecp256k1_v0_9_0_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$
|
||||
|
||||
The corresponding encoder would invoke the x-only one, but negating the output $t$ if $sign(t) \neq sign(y).$
|
||||
|
||||
This is implemented in `rustsecp256k1_v0_9_0_ellswift_elligatorswift_var`.
|
||||
|
||||
Note that this is only intended for encoding points where both the x-coordinate and y-coordinate are unpredictable. When encoding x-only points
|
||||
where the y-coordinate is implicitly even (or implicitly square, or implicitly in $[0,q/2]$), the encoder in
|
||||
[Section 3.5](#35-encoding-for-secp256k1) must be used, or a bias is reintroduced that undoes all the benefit of using ElligatorSwift
|
||||
in the first place.
|
|
@ -12,33 +12,69 @@ It is best if the maintainers are present during the release, so they can help e
|
|||
|
||||
This process also assumes that there will be no minor releases for old major releases.
|
||||
|
||||
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
|
||||
|
||||
## Sanity Checks
|
||||
Perform these checks before creating a release:
|
||||
|
||||
1. Ensure `make distcheck` doesn't fail.
|
||||
```shell
|
||||
./autogen.sh && ./configure --enable-dev-mode && make distcheck
|
||||
```
|
||||
2. Check installation with autotools:
|
||||
```shell
|
||||
dir=$(mktemp -d)
|
||||
./autogen.sh && ./configure --prefix=$dir && make clean && make install && ls -l $dir/include $dir/lib
|
||||
gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=$dir/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"$dir/lib" && ./ecdsa
|
||||
```
|
||||
3. Check installation with CMake:
|
||||
```shell
|
||||
dir=$(mktemp -d)
|
||||
build=$(mktemp -d)
|
||||
cmake -B $build -DCMAKE_INSTALL_PREFIX=$dir && cmake --build $build --target install && ls -l $dir/include $dir/lib*
|
||||
gcc -o ecdsa examples/ecdsa.c -I $dir/include -L $dir/lib*/ -l secp256k1 -Wl,-rpath,"$dir/lib",-rpath,"$dir/lib64" && ./ecdsa
|
||||
```
|
||||
|
||||
## Regular release
|
||||
|
||||
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) (make sure to include an entry for `### ABI Compatibility`) and
|
||||
* updates `_PKG_VERSION_*`, `_LIB_VERSION_*`, and sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`.
|
||||
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
|
||||
* adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
|
||||
* removing the `[Unreleased]` section header, and
|
||||
* including an entry for `### ABI Compatibility` if it doesn't exist that mentions the library soname of the release,
|
||||
* sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and
|
||||
* if this is not a patch release
|
||||
* updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac` and
|
||||
* updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
|
||||
2. After the PR is merged, tag the commit and push it:
|
||||
```
|
||||
RELEASE_COMMIT=<merge commit of step 1>
|
||||
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH" $RELEASE_COMMIT
|
||||
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
||||
```
|
||||
3. Open a PR to the master branch with a commit (using message `"release: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that sets `_PKG_VERSION_IS_RELEASE` to `false` and `_PKG_VERSION_PATCH` to `$PATCH + 1` and increases `_LIB_VERSION_REVISION`. If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
||||
3. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||
* sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
|
||||
* increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
|
||||
* adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
|
||||
|
||||
If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
||||
4. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
||||
|
||||
## Maintenance release
|
||||
|
||||
Note that bugfixes only need to be backported to releases for which no compatible release without the bug exists.
|
||||
|
||||
1. If `$PATCH = 1`, create maintenance branch `$MAJOR.$MINOR`:
|
||||
1. If there's no maintenance branch `$MAJOR.$MINOR`, create one:
|
||||
```
|
||||
git checkout -b $MAJOR.$MINOR v$MAJOR.$MINOR.0
|
||||
git checkout -b $MAJOR.$MINOR v$MAJOR.$MINOR.$((PATCH - 1))
|
||||
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
|
||||
```
|
||||
2. Open a pull request to the `$MAJOR.$MINOR` branch that
|
||||
* includes the bugfixes,
|
||||
* finalizes the release notes,
|
||||
* bumps `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` (with commit message `"release: update PKG_ and LIB_VERSION for $MAJOR.$MINOR.$PATCH"`, for example).
|
||||
* finalizes the release notes similar to a regular release,
|
||||
* increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
|
||||
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
|
||||
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
|
||||
3. After the PRs are merged, update the release branch and tag the commit:
|
||||
```
|
||||
git checkout $MAJOR.$MINOR && git pull
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# The safegcd implementation in libsecp256k1 explained
|
||||
|
||||
This document explains the modular inverse implementation in the `src/modinv*.h` files. It is based
|
||||
on the paper
|
||||
This document explains the modular inverse and Jacobi symbol implementations in the `src/modinv*.h` files.
|
||||
It is based on the paper
|
||||
["Fast constant-time gcd computation and modular inversion"](https://gcd.cr.yp.to/papers.html#safegcd)
|
||||
by Daniel J. Bernstein and Bo-Yin Yang. The references below are for the Date: 2019.04.13 version.
|
||||
|
||||
|
@ -410,7 +410,7 @@ sufficient even. Given that every loop iteration performs *N* divsteps, it will
|
|||
|
||||
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
|
||||
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
|
||||
`valgrind_ctime_test.c` for automated tests that this isn't the case). To do so, observe that a
|
||||
`ctime_tests.c` for automated tests that this isn't the case). To do so, observe that a
|
||||
divstep can be written instead as (compare to the inner loop of `gcd` in section 1).
|
||||
|
||||
```python
|
||||
|
@ -769,3 +769,51 @@ def modinv_var(M, Mi, x):
|
|||
d, e = update_de(d, e, t, M, Mi)
|
||||
return normalize(f, d, Mi)
|
||||
```
|
||||
|
||||
## 8. From GCDs to Jacobi symbol
|
||||
|
||||
We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an
|
||||
extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we
|
||||
make corresponding updates to *j* using
|
||||
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
|
||||
* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*).
|
||||
* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*).
|
||||
|
||||
These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied
|
||||
very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this
|
||||
calculation is slightly simpler than the one for the modular inverse because we no longer need to
|
||||
keep track of *d* and *e*.
|
||||
|
||||
However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for
|
||||
positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative
|
||||
values. We resolve this by using the following modified steps:
|
||||
|
||||
```python
|
||||
# Before
|
||||
if delta > 0 and g & 1:
|
||||
delta, f, g = 1 - delta, g, (g - f) // 2
|
||||
|
||||
# After
|
||||
if delta > 0 and g & 1:
|
||||
delta, f, g = 1 - delta, g, (g + f) // 2
|
||||
```
|
||||
|
||||
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
|
||||
and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm
|
||||
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
|
||||
that the vast majority of nonzero inputs converge to *f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)* in a
|
||||
number of steps proportional to their logarithm.
|
||||
|
||||
Note that:
|
||||
- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached.
|
||||
- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect.
|
||||
- We need to update the termination condition from *g=0* to *f=1*.
|
||||
|
||||
We account for the possibility of nonconvergence by only performing a bounded number of
|
||||
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
|
||||
yet been found.
|
||||
|
||||
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
|
||||
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
|
||||
since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate
|
||||
Jacobi symbols for secret data) to the posdivsteps version.
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
function(add_example name)
|
||||
set(target_name ${name}_example)
|
||||
add_executable(${target_name} ${name}.c)
|
||||
target_include_directories(${target_name} PRIVATE
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
)
|
||||
target_link_libraries(${target_name}
|
||||
secp256k1
|
||||
$<$<PLATFORM_ID:Windows>:bcrypt>
|
||||
)
|
||||
set(test_name ${name}_example)
|
||||
add_test(NAME ${test_name} COMMAND ${target_name})
|
||||
if(BUILD_SHARED_LIBS AND MSVC)
|
||||
# The DLL must reside either in the same folder where the executable is
|
||||
# or somewhere in PATH. Using the latter option.
|
||||
set_tests_properties(${test_name} PROPERTIES
|
||||
ENVIRONMENT "PATH=$<TARGET_FILE_DIR:secp256k1>;$ENV{PATH}"
|
||||
)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
add_example(ecdsa)
|
||||
|
||||
if(SECP256K1_ENABLE_MODULE_ECDH)
|
||||
add_example(ecdh)
|
||||
endif()
|
||||
|
||||
if(SECP256K1_ENABLE_MODULE_SCHNORRSIG)
|
||||
add_example(schnorr)
|
||||
endif()
|
|
@ -14,8 +14,7 @@
|
|||
#include <secp256k1.h>
|
||||
#include <secp256k1_ecdh.h>
|
||||
|
||||
#include "random.h"
|
||||
|
||||
#include "examples_util.h"
|
||||
|
||||
int main(void) {
|
||||
unsigned char seckey1[32];
|
||||
|
@ -27,19 +26,19 @@ int main(void) {
|
|||
unsigned char randomize[32];
|
||||
int return_val;
|
||||
size_t len;
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey1;
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey2;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey1;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey2;
|
||||
|
||||
/* Before we can call actual API functions, we need to create a "context". */
|
||||
rustsecp256k1_v0_8_1_context* ctx = rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE);
|
||||
rustsecp256k1_v0_9_0_context* ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
|
||||
if (!fill_random(randomize, sizeof(randomize))) {
|
||||
printf("Failed to generate randomness\n");
|
||||
return 1;
|
||||
}
|
||||
/* Randomizing the context is recommended to protect against side-channel
|
||||
* leakage See `rustsecp256k1_v0_8_1_context_randomize` in secp256k1.h for more
|
||||
* leakage See `rustsecp256k1_v0_9_0_context_randomize` in secp256k1.h for more
|
||||
* information about it. This should never fail. */
|
||||
return_val = rustsecp256k1_v0_8_1_context_randomize(ctx, randomize);
|
||||
return_val = rustsecp256k1_v0_9_0_context_randomize(ctx, randomize);
|
||||
assert(return_val);
|
||||
|
||||
/*** Key Generation ***/
|
||||
|
@ -52,27 +51,27 @@ int main(void) {
|
|||
printf("Failed to generate randomness\n");
|
||||
return 1;
|
||||
}
|
||||
if (rustsecp256k1_v0_8_1_ec_seckey_verify(ctx, seckey1) && rustsecp256k1_v0_8_1_ec_seckey_verify(ctx, seckey2)) {
|
||||
if (rustsecp256k1_v0_9_0_ec_seckey_verify(ctx, seckey1) && rustsecp256k1_v0_9_0_ec_seckey_verify(ctx, seckey2)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Public key creation using a valid context with a verified secret key should never fail */
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_create(ctx, &pubkey1, seckey1);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_create(ctx, &pubkey1, seckey1);
|
||||
assert(return_val);
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_create(ctx, &pubkey2, seckey2);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_create(ctx, &pubkey2, seckey2);
|
||||
assert(return_val);
|
||||
|
||||
/* Serialize pubkey1 in a compressed form (33 bytes), should always return 1 */
|
||||
len = sizeof(compressed_pubkey1);
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_serialize(ctx, compressed_pubkey1, &len, &pubkey1, SECP256K1_EC_COMPRESSED);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, compressed_pubkey1, &len, &pubkey1, SECP256K1_EC_COMPRESSED);
|
||||
assert(return_val);
|
||||
/* Should be the same size as the size of the output, because we passed a 33 byte array. */
|
||||
assert(len == sizeof(compressed_pubkey1));
|
||||
|
||||
/* Serialize pubkey2 in a compressed form (33 bytes) */
|
||||
len = sizeof(compressed_pubkey2);
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_serialize(ctx, compressed_pubkey2, &len, &pubkey2, SECP256K1_EC_COMPRESSED);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, compressed_pubkey2, &len, &pubkey2, SECP256K1_EC_COMPRESSED);
|
||||
assert(return_val);
|
||||
/* Should be the same size as the size of the output, because we passed a 33 byte array. */
|
||||
assert(len == sizeof(compressed_pubkey2));
|
||||
|
@ -81,12 +80,12 @@ int main(void) {
|
|||
|
||||
/* Perform ECDH with seckey1 and pubkey2. Should never fail with a verified
|
||||
* seckey and valid pubkey */
|
||||
return_val = rustsecp256k1_v0_8_1_ecdh(ctx, shared_secret1, &pubkey2, seckey1, NULL, NULL);
|
||||
return_val = rustsecp256k1_v0_9_0_ecdh(ctx, shared_secret1, &pubkey2, seckey1, NULL, NULL);
|
||||
assert(return_val);
|
||||
|
||||
/* Perform ECDH with seckey2 and pubkey1. Should never fail with a verified
|
||||
* seckey and valid pubkey */
|
||||
return_val = rustsecp256k1_v0_8_1_ecdh(ctx, shared_secret2, &pubkey1, seckey2, NULL, NULL);
|
||||
return_val = rustsecp256k1_v0_9_0_ecdh(ctx, shared_secret2, &pubkey1, seckey2, NULL, NULL);
|
||||
assert(return_val);
|
||||
|
||||
/* Both parties should end up with the same shared secret */
|
||||
|
@ -105,19 +104,19 @@ int main(void) {
|
|||
print_hex(shared_secret1, sizeof(shared_secret1));
|
||||
|
||||
/* This will clear everything from the context and free the memory */
|
||||
rustsecp256k1_v0_8_1_context_destroy(ctx);
|
||||
rustsecp256k1_v0_9_0_context_destroy(ctx);
|
||||
|
||||
/* It's best practice to try to clear secrets from memory after using them.
|
||||
* This is done because some bugs can allow an attacker to leak memory, for
|
||||
* example through "out of bounds" array access (see Heartbleed), Or the OS
|
||||
* swapping them to disk. Hence, we overwrite the secret key buffer with zeros.
|
||||
*
|
||||
* TODO: Prevent these writes from being optimized out, as any good compiler
|
||||
* Here we are preventing these writes from being optimized out, as any good compiler
|
||||
* will remove any writes that aren't used. */
|
||||
memset(seckey1, 0, sizeof(seckey1));
|
||||
memset(seckey2, 0, sizeof(seckey2));
|
||||
memset(shared_secret1, 0, sizeof(shared_secret1));
|
||||
memset(shared_secret2, 0, sizeof(shared_secret2));
|
||||
secure_erase(seckey1, sizeof(seckey1));
|
||||
secure_erase(seckey2, sizeof(seckey2));
|
||||
secure_erase(shared_secret1, sizeof(shared_secret1));
|
||||
secure_erase(shared_secret2, sizeof(shared_secret2));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,9 +13,7 @@
|
|||
|
||||
#include <secp256k1.h>
|
||||
|
||||
#include "random.h"
|
||||
|
||||
|
||||
#include "examples_util.h"
|
||||
|
||||
int main(void) {
|
||||
/* Instead of signing the message directly, we must sign a 32-byte hash.
|
||||
|
@ -34,20 +32,20 @@ int main(void) {
|
|||
unsigned char compressed_pubkey[33];
|
||||
unsigned char serialized_signature[64];
|
||||
size_t len;
|
||||
int is_signature_valid;
|
||||
int is_signature_valid, is_signature_valid2;
|
||||
int return_val;
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey;
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature sig;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature sig;
|
||||
/* Before we can call actual API functions, we need to create a "context". */
|
||||
rustsecp256k1_v0_8_1_context* ctx = rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE);
|
||||
rustsecp256k1_v0_9_0_context* ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
|
||||
if (!fill_random(randomize, sizeof(randomize))) {
|
||||
printf("Failed to generate randomness\n");
|
||||
return 1;
|
||||
}
|
||||
/* Randomizing the context is recommended to protect against side-channel
|
||||
* leakage See `rustsecp256k1_v0_8_1_context_randomize` in secp256k1.h for more
|
||||
* leakage See `rustsecp256k1_v0_9_0_context_randomize` in secp256k1.h for more
|
||||
* information about it. This should never fail. */
|
||||
return_val = rustsecp256k1_v0_8_1_context_randomize(ctx, randomize);
|
||||
return_val = rustsecp256k1_v0_9_0_context_randomize(ctx, randomize);
|
||||
assert(return_val);
|
||||
|
||||
/*** Key Generation ***/
|
||||
|
@ -60,18 +58,18 @@ int main(void) {
|
|||
printf("Failed to generate randomness\n");
|
||||
return 1;
|
||||
}
|
||||
if (rustsecp256k1_v0_8_1_ec_seckey_verify(ctx, seckey)) {
|
||||
if (rustsecp256k1_v0_9_0_ec_seckey_verify(ctx, seckey)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Public key creation using a valid context with a verified secret key should never fail */
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_create(ctx, &pubkey, seckey);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_create(ctx, &pubkey, seckey);
|
||||
assert(return_val);
|
||||
|
||||
/* Serialize the pubkey in a compressed form(33 bytes). Should always return 1. */
|
||||
len = sizeof(compressed_pubkey);
|
||||
return_val = rustsecp256k1_v0_8_1_ec_pubkey_serialize(ctx, compressed_pubkey, &len, &pubkey, SECP256K1_EC_COMPRESSED);
|
||||
return_val = rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, compressed_pubkey, &len, &pubkey, SECP256K1_EC_COMPRESSED);
|
||||
assert(return_val);
|
||||
/* Should be the same size as the size of the output, because we passed a 33 byte array. */
|
||||
assert(len == sizeof(compressed_pubkey));
|
||||
|
@ -82,31 +80,31 @@ int main(void) {
|
|||
* custom nonce function, passing `NULL` will use the RFC-6979 safe default.
|
||||
* Signing with a valid context, verified secret key
|
||||
* and the default nonce function should never fail. */
|
||||
return_val = rustsecp256k1_v0_8_1_ecdsa_sign(ctx, &sig, msg_hash, seckey, NULL, NULL);
|
||||
return_val = rustsecp256k1_v0_9_0_ecdsa_sign(ctx, &sig, msg_hash, seckey, NULL, NULL);
|
||||
assert(return_val);
|
||||
|
||||
/* Serialize the signature in a compact form. Should always return 1
|
||||
* according to the documentation in secp256k1.h. */
|
||||
return_val = rustsecp256k1_v0_8_1_ecdsa_signature_serialize_compact(ctx, serialized_signature, &sig);
|
||||
return_val = rustsecp256k1_v0_9_0_ecdsa_signature_serialize_compact(ctx, serialized_signature, &sig);
|
||||
assert(return_val);
|
||||
|
||||
|
||||
/*** Verification ***/
|
||||
|
||||
/* Deserialize the signature. This will return 0 if the signature can't be parsed correctly. */
|
||||
if (!rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(ctx, &sig, serialized_signature)) {
|
||||
if (!rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(ctx, &sig, serialized_signature)) {
|
||||
printf("Failed parsing the signature\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Deserialize the public key. This will return 0 if the public key can't be parsed correctly. */
|
||||
if (!rustsecp256k1_v0_8_1_ec_pubkey_parse(ctx, &pubkey, compressed_pubkey, sizeof(compressed_pubkey))) {
|
||||
if (!rustsecp256k1_v0_9_0_ec_pubkey_parse(ctx, &pubkey, compressed_pubkey, sizeof(compressed_pubkey))) {
|
||||
printf("Failed parsing the public key\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Verify a signature. This will return 1 if it's valid and 0 if it's not. */
|
||||
is_signature_valid = rustsecp256k1_v0_8_1_ecdsa_verify(ctx, &sig, msg_hash, &pubkey);
|
||||
is_signature_valid = rustsecp256k1_v0_9_0_ecdsa_verify(ctx, &sig, msg_hash, &pubkey);
|
||||
|
||||
printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false");
|
||||
printf("Secret Key: ");
|
||||
|
@ -116,18 +114,26 @@ int main(void) {
|
|||
printf("Signature: ");
|
||||
print_hex(serialized_signature, sizeof(serialized_signature));
|
||||
|
||||
|
||||
/* This will clear everything from the context and free the memory */
|
||||
rustsecp256k1_v0_8_1_context_destroy(ctx);
|
||||
rustsecp256k1_v0_9_0_context_destroy(ctx);
|
||||
|
||||
/* Bonus example: if all we need is signature verification (and no key
|
||||
generation or signing), we don't need to use a context created via
|
||||
rustsecp256k1_v0_9_0_context_create(). We can simply use the static (i.e., global)
|
||||
context rustsecp256k1_v0_9_0_context_static. See its description in
|
||||
include/secp256k1.h for details. */
|
||||
is_signature_valid2 = rustsecp256k1_v0_9_0_ecdsa_verify(rustsecp256k1_v0_9_0_context_static,
|
||||
&sig, msg_hash, &pubkey);
|
||||
assert(is_signature_valid2 == is_signature_valid);
|
||||
|
||||
/* It's best practice to try to clear secrets from memory after using them.
|
||||
* This is done because some bugs can allow an attacker to leak memory, for
|
||||
* example through "out of bounds" array access (see Heartbleed), Or the OS
|
||||
* swapping them to disk. Hence, we overwrite the secret key buffer with zeros.
|
||||
*
|
||||
* TODO: Prevent these writes from being optimized out, as any good compiler
|
||||
* Here we are preventing these writes from being optimized out, as any good compiler
|
||||
* will remove any writes that aren't used. */
|
||||
memset(seckey, 0, sizeof(seckey));
|
||||
secure_erase(seckey, sizeof(seckey));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,13 @@
|
|||
*/
|
||||
|
||||
#if defined(_WIN32)
|
||||
/*
|
||||
* The defined WIN32_NO_STATUS macro disables return code definitions in
|
||||
* windows.h, which avoids "macro redefinition" MSVC warnings in ntstatus.h.
|
||||
*/
|
||||
#define WIN32_NO_STATUS
|
||||
#include <windows.h>
|
||||
#undef WIN32_NO_STATUS
|
||||
#include <ntstatus.h>
|
||||
#include <bcrypt.h>
|
||||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
|
||||
|
@ -71,3 +77,32 @@ static void print_hex(unsigned char* data, size_t size) {
|
|||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
// For SecureZeroMemory
|
||||
#include <Windows.h>
|
||||
#endif
|
||||
/* Cleanses memory to prevent leaking sensitive info. Won't be optimized out. */
|
||||
static void secure_erase(void *ptr, size_t len) {
|
||||
#if defined(_MSC_VER)
|
||||
/* SecureZeroMemory is guaranteed not to be optimized out by MSVC. */
|
||||
SecureZeroMemory(ptr, len);
|
||||
#elif defined(__GNUC__)
|
||||
/* We use a memory barrier that scares the compiler away from optimizing out the memset.
|
||||
*
|
||||
* Quoting Adam Langley <agl@google.com> in commit ad1907fe73334d6c696c8539646c21b11178f20f
|
||||
* in BoringSSL (ISC License):
|
||||
* As best as we can tell, this is sufficient to break any optimisations that
|
||||
* might try to eliminate "superfluous" memsets.
|
||||
* This method used in memzero_explicit() the Linux kernel, too. Its advantage is that it is
|
||||
* pretty efficient, because the compiler can still implement the memset() efficiently,
|
||||
* just not remove it entirely. See "Dead Store Elimination (Still) Considered Harmful" by
|
||||
* Yang et al. (USENIX Security 2017) for more background.
|
||||
*/
|
||||
memset(ptr, 0, len);
|
||||
__asm__ __volatile__("" : : "r"(ptr) : "memory");
|
||||
#else
|
||||
void *(*volatile const volatile_memset)(void *, int, size_t) = memset;
|
||||
volatile_memset(ptr, 0, len);
|
||||
#endif
|
||||
}
|
|
@ -15,7 +15,7 @@
|
|||
#include <secp256k1_extrakeys.h>
|
||||
#include <secp256k1_schnorrsig.h>
|
||||
|
||||
#include "random.h"
|
||||
#include "examples_util.h"
|
||||
|
||||
int main(void) {
|
||||
unsigned char msg[12] = "Hello World!";
|
||||
|
@ -26,20 +26,20 @@ int main(void) {
|
|||
unsigned char auxiliary_rand[32];
|
||||
unsigned char serialized_pubkey[32];
|
||||
unsigned char signature[64];
|
||||
int is_signature_valid;
|
||||
int is_signature_valid, is_signature_valid2;
|
||||
int return_val;
|
||||
rustsecp256k1_v0_8_1_xonly_pubkey pubkey;
|
||||
rustsecp256k1_v0_8_1_keypair keypair;
|
||||
rustsecp256k1_v0_9_0_xonly_pubkey pubkey;
|
||||
rustsecp256k1_v0_9_0_keypair keypair;
|
||||
/* Before we can call actual API functions, we need to create a "context". */
|
||||
rustsecp256k1_v0_8_1_context* ctx = rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE);
|
||||
rustsecp256k1_v0_9_0_context* ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
|
||||
if (!fill_random(randomize, sizeof(randomize))) {
|
||||
printf("Failed to generate randomness\n");
|
||||
return 1;
|
||||
}
|
||||
/* Randomizing the context is recommended to protect against side-channel
|
||||
* leakage See `rustsecp256k1_v0_8_1_context_randomize` in secp256k1.h for more
|
||||
* leakage See `rustsecp256k1_v0_9_0_context_randomize` in secp256k1.h for more
|
||||
* information about it. This should never fail. */
|
||||
return_val = rustsecp256k1_v0_8_1_context_randomize(ctx, randomize);
|
||||
return_val = rustsecp256k1_v0_9_0_context_randomize(ctx, randomize);
|
||||
assert(return_val);
|
||||
|
||||
/*** Key Generation ***/
|
||||
|
@ -54,21 +54,21 @@ int main(void) {
|
|||
}
|
||||
/* Try to create a keypair with a valid context, it should only fail if
|
||||
* the secret key is zero or out of range. */
|
||||
if (rustsecp256k1_v0_8_1_keypair_create(ctx, &keypair, seckey)) {
|
||||
if (rustsecp256k1_v0_9_0_keypair_create(ctx, &keypair, seckey)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Extract the X-only public key from the keypair. We pass NULL for
|
||||
* `pk_parity` as the parity isn't needed for signing or verification.
|
||||
* `rustsecp256k1_v0_8_1_keypair_xonly_pub` supports returning the parity for
|
||||
* `rustsecp256k1_v0_9_0_keypair_xonly_pub` supports returning the parity for
|
||||
* other use cases such as tests or verifying Taproot tweaks.
|
||||
* This should never fail with a valid context and public key. */
|
||||
return_val = rustsecp256k1_v0_8_1_keypair_xonly_pub(ctx, &pubkey, NULL, &keypair);
|
||||
return_val = rustsecp256k1_v0_9_0_keypair_xonly_pub(ctx, &pubkey, NULL, &keypair);
|
||||
assert(return_val);
|
||||
|
||||
/* Serialize the public key. Should always return 1 for a valid public key. */
|
||||
return_val = rustsecp256k1_v0_8_1_xonly_pubkey_serialize(ctx, serialized_pubkey, &pubkey);
|
||||
return_val = rustsecp256k1_v0_9_0_xonly_pubkey_serialize(ctx, serialized_pubkey, &pubkey);
|
||||
assert(return_val);
|
||||
|
||||
/*** Signing ***/
|
||||
|
@ -76,7 +76,7 @@ int main(void) {
|
|||
/* Instead of signing (possibly very long) messages directly, we sign a
|
||||
* 32-byte hash of the message in this example.
|
||||
*
|
||||
* We use rustsecp256k1_v0_8_1_tagged_sha256 to create this hash. This function expects
|
||||
* We use rustsecp256k1_v0_9_0_tagged_sha256 to create this hash. This function expects
|
||||
* a context-specific "tag", which restricts the context in which the signed
|
||||
* messages should be considered valid. For example, if protocol A mandates
|
||||
* to use the tag "my_fancy_protocol" and protocol B mandates to use the tag
|
||||
|
@ -87,7 +87,7 @@ int main(void) {
|
|||
* message that has intended consequences in the intended context (e.g.,
|
||||
* protocol A) but would have unintended consequences if it were valid in
|
||||
* some other context (e.g., protocol B). */
|
||||
return_val = rustsecp256k1_v0_8_1_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg));
|
||||
return_val = rustsecp256k1_v0_9_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg));
|
||||
assert(return_val);
|
||||
|
||||
/* Generate 32 bytes of randomness to use with BIP-340 schnorr signing. */
|
||||
|
@ -98,30 +98,30 @@ int main(void) {
|
|||
|
||||
/* Generate a Schnorr signature.
|
||||
*
|
||||
* We use the rustsecp256k1_v0_8_1_schnorrsig_sign32 function that provides a simple
|
||||
* We use the rustsecp256k1_v0_9_0_schnorrsig_sign32 function that provides a simple
|
||||
* interface for signing 32-byte messages (which in our case is a hash of
|
||||
* the actual message). BIP-340 recommends passing 32 bytes of randomness
|
||||
* to the signing function to improve security against side-channel attacks.
|
||||
* Signing with a valid context, a 32-byte message, a verified keypair, and
|
||||
* any 32 bytes of auxiliary random data should never fail. */
|
||||
return_val = rustsecp256k1_v0_8_1_schnorrsig_sign32(ctx, signature, msg_hash, &keypair, auxiliary_rand);
|
||||
return_val = rustsecp256k1_v0_9_0_schnorrsig_sign32(ctx, signature, msg_hash, &keypair, auxiliary_rand);
|
||||
assert(return_val);
|
||||
|
||||
/*** Verification ***/
|
||||
|
||||
/* Deserialize the public key. This will return 0 if the public key can't
|
||||
* be parsed correctly */
|
||||
if (!rustsecp256k1_v0_8_1_xonly_pubkey_parse(ctx, &pubkey, serialized_pubkey)) {
|
||||
if (!rustsecp256k1_v0_9_0_xonly_pubkey_parse(ctx, &pubkey, serialized_pubkey)) {
|
||||
printf("Failed parsing the public key\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Compute the tagged hash on the received messages using the same tag as the signer. */
|
||||
return_val = rustsecp256k1_v0_8_1_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg));
|
||||
return_val = rustsecp256k1_v0_9_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg));
|
||||
assert(return_val);
|
||||
|
||||
/* Verify a signature. This will return 1 if it's valid and 0 if it's not. */
|
||||
is_signature_valid = rustsecp256k1_v0_8_1_schnorrsig_verify(ctx, signature, msg_hash, 32, &pubkey);
|
||||
is_signature_valid = rustsecp256k1_v0_9_0_schnorrsig_verify(ctx, signature, msg_hash, 32, &pubkey);
|
||||
|
||||
|
||||
printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false");
|
||||
|
@ -133,16 +133,24 @@ int main(void) {
|
|||
print_hex(signature, sizeof(signature));
|
||||
|
||||
/* This will clear everything from the context and free the memory */
|
||||
rustsecp256k1_v0_8_1_context_destroy(ctx);
|
||||
rustsecp256k1_v0_9_0_context_destroy(ctx);
|
||||
|
||||
/* Bonus example: if all we need is signature verification (and no key
|
||||
generation or signing), we don't need to use a context created via
|
||||
rustsecp256k1_v0_9_0_context_create(). We can simply use the static (i.e., global)
|
||||
context rustsecp256k1_v0_9_0_context_static. See its description in
|
||||
include/secp256k1.h for details. */
|
||||
is_signature_valid2 = rustsecp256k1_v0_9_0_schnorrsig_verify(rustsecp256k1_v0_9_0_context_static,
|
||||
signature, msg_hash, 32, &pubkey);
|
||||
assert(is_signature_valid2 == is_signature_valid);
|
||||
|
||||
/* It's best practice to try to clear secrets from memory after using them.
|
||||
* This is done because some bugs can allow an attacker to leak memory, for
|
||||
* example through "out of bounds" array access (see Heartbleed), Or the OS
|
||||
* swapping them to disk. Hence, we overwrite the secret key buffer with zeros.
|
||||
*
|
||||
* TODO: Prevent these writes from being optimized out, as any good compiler
|
||||
* Here we are preventing these writes from being optimized out, as any good compiler
|
||||
* will remove any writes that aren't used. */
|
||||
memset(seckey, 0, sizeof(seckey));
|
||||
|
||||
secure_erase(seckey, sizeof(seckey));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,25 +29,25 @@ extern "C" {
|
|||
* The primary purpose of context objects is to store randomization data for
|
||||
* enhanced protection against side-channel leakage. This protection is only
|
||||
* effective if the context is randomized after its creation. See
|
||||
* rustsecp256k1_v0_8_1_context_create for creation of contexts and
|
||||
* rustsecp256k1_v0_8_1_context_randomize for randomization.
|
||||
* rustsecp256k1_v0_9_0_context_create for creation of contexts and
|
||||
* rustsecp256k1_v0_9_0_context_randomize for randomization.
|
||||
*
|
||||
* A secondary purpose of context objects is to store pointers to callback
|
||||
* functions that the library will call when certain error states arise. See
|
||||
* rustsecp256k1_v0_8_1_context_set_error_callback as well as
|
||||
* rustsecp256k1_v0_8_1_context_set_illegal_callback for details. Future library versions
|
||||
* rustsecp256k1_v0_9_0_context_set_error_callback as well as
|
||||
* rustsecp256k1_v0_9_0_context_set_illegal_callback for details. Future library versions
|
||||
* may use context objects for additional purposes.
|
||||
*
|
||||
* A constructed context can safely be used from multiple threads
|
||||
* simultaneously, but API calls that take a non-const pointer to a context
|
||||
* need exclusive access to it. In particular this is the case for
|
||||
* rustsecp256k1_v0_8_1_context_destroy, rustsecp256k1_v0_8_1_context_preallocated_destroy,
|
||||
* and rustsecp256k1_v0_8_1_context_randomize.
|
||||
* rustsecp256k1_v0_9_0_context_destroy, rustsecp256k1_v0_9_0_context_preallocated_destroy,
|
||||
* and rustsecp256k1_v0_9_0_context_randomize.
|
||||
*
|
||||
* Regarding randomization, either do it once at creation time (in which case
|
||||
* you do not need any locking for the other calls), or use a read-write lock.
|
||||
*/
|
||||
typedef struct rustsecp256k1_v0_8_1_context_struct rustsecp256k1_v0_8_1_context;
|
||||
typedef struct rustsecp256k1_v0_9_0_context_struct rustsecp256k1_v0_9_0_context;
|
||||
|
||||
/** Opaque data structure that holds rewritable "scratch space"
|
||||
*
|
||||
|
@ -60,7 +60,7 @@ typedef struct rustsecp256k1_v0_8_1_context_struct rustsecp256k1_v0_8_1_context;
|
|||
* Unlike the context object, this cannot safely be shared between threads
|
||||
* without additional synchronization logic.
|
||||
*/
|
||||
typedef struct rustsecp256k1_v0_8_1_scratch_space_struct rustsecp256k1_v0_8_1_scratch_space;
|
||||
typedef struct rustsecp256k1_v0_9_0_scratch_space_struct rustsecp256k1_v0_9_0_scratch_space;
|
||||
|
||||
/** Opaque data structure that holds a parsed and valid public key.
|
||||
*
|
||||
|
@ -68,12 +68,12 @@ typedef struct rustsecp256k1_v0_8_1_scratch_space_struct rustsecp256k1_v0_8_1_sc
|
|||
* guaranteed to be portable between different platforms or versions. It is
|
||||
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
|
||||
* If you need to convert to a format suitable for storage or transmission,
|
||||
* use rustsecp256k1_v0_8_1_ec_pubkey_serialize and rustsecp256k1_v0_8_1_ec_pubkey_parse. To
|
||||
* compare keys, use rustsecp256k1_v0_8_1_ec_pubkey_cmp.
|
||||
* use rustsecp256k1_v0_9_0_ec_pubkey_serialize and rustsecp256k1_v0_9_0_ec_pubkey_parse. To
|
||||
* compare keys, use rustsecp256k1_v0_9_0_ec_pubkey_cmp.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned char data[64];
|
||||
} rustsecp256k1_v0_8_1_pubkey;
|
||||
} rustsecp256k1_v0_9_0_pubkey;
|
||||
|
||||
/** Opaque data structured that holds a parsed ECDSA signature.
|
||||
*
|
||||
|
@ -81,12 +81,12 @@ typedef struct {
|
|||
* guaranteed to be portable between different platforms or versions. It is
|
||||
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
|
||||
* If you need to convert to a format suitable for storage, transmission, or
|
||||
* comparison, use the rustsecp256k1_v0_8_1_ecdsa_signature_serialize_* and
|
||||
* rustsecp256k1_v0_8_1_ecdsa_signature_parse_* functions.
|
||||
* comparison, use the rustsecp256k1_v0_9_0_ecdsa_signature_serialize_* and
|
||||
* rustsecp256k1_v0_9_0_ecdsa_signature_parse_* functions.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned char data[64];
|
||||
} rustsecp256k1_v0_8_1_ecdsa_signature;
|
||||
} rustsecp256k1_v0_9_0_ecdsa_signature;
|
||||
|
||||
/** A pointer to a function to deterministically generate a nonce.
|
||||
*
|
||||
|
@ -104,7 +104,7 @@ typedef struct {
|
|||
* Except for test cases, this function should compute some cryptographic hash of
|
||||
* the message, the algorithm, the key and the attempt.
|
||||
*/
|
||||
typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
||||
typedef int (*rustsecp256k1_v0_9_0_nonce_function)(
|
||||
unsigned char *nonce32,
|
||||
const unsigned char *msg32,
|
||||
const unsigned char *key32,
|
||||
|
@ -122,18 +122,6 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
|||
# endif
|
||||
# endif
|
||||
|
||||
# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
|
||||
# if SECP256K1_GNUC_PREREQ(2,7)
|
||||
# define SECP256K1_INLINE __inline__
|
||||
# elif (defined(_MSC_VER))
|
||||
# define SECP256K1_INLINE __inline
|
||||
# else
|
||||
# define SECP256K1_INLINE
|
||||
# endif
|
||||
# else
|
||||
# define SECP256K1_INLINE inline
|
||||
# endif
|
||||
|
||||
/* When this header is used at build-time the SECP256K1_BUILD define needs to be set
|
||||
* to correctly setup export attributes and nullness checks. This is normally done
|
||||
* by secp256k1.c but to guard against this header being included before secp256k1.c
|
||||
|
@ -145,21 +133,35 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
|||
# define SECP256K1_NO_BUILD
|
||||
#endif
|
||||
|
||||
/** At secp256k1 build-time DLL_EXPORT is defined when building objects destined
|
||||
* for a shared library, but not for those intended for static libraries.
|
||||
*/
|
||||
|
||||
#ifndef SECP256K1_API
|
||||
/* Symbol visibility. */
|
||||
#if defined(_WIN32)
|
||||
# if defined(SECP256K1_BUILD) && defined(DLL_EXPORT)
|
||||
# define SECP256K1_API __declspec(dllexport)
|
||||
# else
|
||||
# define SECP256K1_API
|
||||
/* GCC for Windows (e.g., MinGW) accepts the __declspec syntax
|
||||
* for MSVC compatibility. A __declspec declaration implies (but is not
|
||||
* exactly equivalent to) __attribute__ ((visibility("default"))), and so we
|
||||
* actually want __declspec even on GCC, see "Microsoft Windows Function
|
||||
* Attributes" in the GCC manual and the recommendations in
|
||||
* https://gcc.gnu.org/wiki/Visibility. */
|
||||
# if defined(SECP256K1_BUILD)
|
||||
# if defined(DLL_EXPORT) || defined(SECP256K1_DLL_EXPORT)
|
||||
/* Building libsecp256k1 as a DLL.
|
||||
* 1. If using Libtool, it defines DLL_EXPORT automatically.
|
||||
* 2. In other cases, SECP256K1_DLL_EXPORT must be defined. */
|
||||
# define SECP256K1_API extern __declspec (dllexport)
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(SECP256K1_BUILD)
|
||||
# define SECP256K1_API __attribute__ ((visibility ("default")))
|
||||
/* The user must define SECP256K1_STATIC when consuming libsecp256k1 as a static
|
||||
* library on Windows. */
|
||||
# elif !defined(SECP256K1_STATIC)
|
||||
/* Consuming libsecp256k1 as a DLL. */
|
||||
# define SECP256K1_API extern __declspec (dllimport)
|
||||
# endif
|
||||
#endif
|
||||
#ifndef SECP256K1_API
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && defined(SECP256K1_BUILD)
|
||||
/* Building libsecp256k1 on non-Windows using GCC or compatible. */
|
||||
# define SECP256K1_API extern __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define SECP256K1_API
|
||||
/* All cases not captured above. */
|
||||
# define SECP256K1_API extern
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
@ -198,8 +200,8 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
|||
#define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10)
|
||||
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
|
||||
|
||||
/** Context flags to pass to rustsecp256k1_v0_8_1_context_create, rustsecp256k1_v0_8_1_context_preallocated_size, and
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_create. */
|
||||
/** Context flags to pass to rustsecp256k1_v0_9_0_context_create, rustsecp256k1_v0_9_0_context_preallocated_size, and
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_create. */
|
||||
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
|
||||
|
||||
/** Deprecated context flags. These flags are treated equivalent to SECP256K1_CONTEXT_NONE. */
|
||||
|
@ -209,7 +211,7 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
|||
/* Testing flag. Do not use. */
|
||||
#define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY)
|
||||
|
||||
/** Flag to pass to rustsecp256k1_v0_8_1_ec_pubkey_serialize. */
|
||||
/** Flag to pass to rustsecp256k1_v0_9_0_ec_pubkey_serialize. */
|
||||
#define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
|
||||
#define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION)
|
||||
|
||||
|
@ -221,23 +223,20 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function)(
|
|||
#define SECP256K1_TAG_PUBKEY_HYBRID_ODD 0x07
|
||||
|
||||
/** A built-in constant secp256k1 context object with static storage duration, to be
|
||||
* used in conjunction with rustsecp256k1_v0_8_1_selftest.
|
||||
* used in conjunction with rustsecp256k1_v0_9_0_selftest.
|
||||
*
|
||||
* This context object offers *only limited functionality* , i.e., it cannot be used
|
||||
* for API functions that perform computations involving secret keys, e.g., signing
|
||||
* and public key generation. If this restriction applies to a specific API function,
|
||||
* it is mentioned in its documentation. See rustsecp256k1_v0_8_1_context_create if you need a
|
||||
* it is mentioned in its documentation. See rustsecp256k1_v0_9_0_context_create if you need a
|
||||
* full context object that supports all functionality offered by the library.
|
||||
*
|
||||
* It is highly recommended to call rustsecp256k1_v0_8_1_selftest before using this context.
|
||||
* It is highly recommended to call rustsecp256k1_v0_9_0_selftest before using this context.
|
||||
*/
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_context *rustsecp256k1_v0_8_1_context_static;
|
||||
|
||||
/** Deprecated alias for rustsecp256k1_v0_8_1_context_static. */
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_context *rustsecp256k1_v0_8_1_context_no_precomp
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_context_static instead");
|
||||
/** Deprecated alias for rustsecp256k1_v0_9_0_context_static. */
|
||||
|
||||
/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_8_1_context_static)
|
||||
/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_9_0_context_static)
|
||||
*
|
||||
* This function performs self tests that detect some serious usage errors and
|
||||
* similar conditions, e.g., when the library is compiled for the wrong endianness.
|
||||
|
@ -245,23 +244,23 @@ SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_context_static instead");
|
|||
* very rudimentary and are not intended as a replacement for running the test
|
||||
* binaries.
|
||||
*
|
||||
* It is highly recommended to call this before using rustsecp256k1_v0_8_1_context_static.
|
||||
* It is highly recommended to call this before using rustsecp256k1_v0_9_0_context_static.
|
||||
* It is not necessary to call this function before using a context created with
|
||||
* rustsecp256k1_v0_8_1_context_create (or rustsecp256k1_v0_8_1_context_preallocated_create), which will
|
||||
* rustsecp256k1_v0_9_0_context_create (or rustsecp256k1_v0_9_0_context_preallocated_create), which will
|
||||
* take care of performing the self tests.
|
||||
*
|
||||
* If the tests fail, this function will call the default error handler to abort the
|
||||
* program (see rustsecp256k1_v0_8_1_context_set_error_callback).
|
||||
* program (see rustsecp256k1_v0_9_0_context_set_error_callback).
|
||||
*/
|
||||
SECP256K1_API void rustsecp256k1_v0_8_1_selftest(void);
|
||||
SECP256K1_API void rustsecp256k1_v0_9_0_selftest(void);
|
||||
|
||||
|
||||
/** Create a secp256k1 context object (in dynamically allocated memory).
|
||||
*
|
||||
* This function uses malloc to allocate memory. It is guaranteed that malloc is
|
||||
* called at most once for every call of this function. If you need to avoid dynamic
|
||||
* memory allocation entirely, see rustsecp256k1_v0_8_1_context_static and the functions in
|
||||
* rustsecp256k1_v0_8_1_preallocated.h.
|
||||
* memory allocation entirely, see rustsecp256k1_v0_9_0_context_static and the functions in
|
||||
* rustsecp256k1_v0_9_0_preallocated.h.
|
||||
*
|
||||
* Returns: a newly created context object.
|
||||
* In: flags: Always set to SECP256K1_CONTEXT_NONE (see below).
|
||||
|
@ -274,38 +273,39 @@ SECP256K1_API void rustsecp256k1_v0_8_1_selftest(void);
|
|||
*
|
||||
* If the context is intended to be used for API functions that perform computations
|
||||
* involving secret keys, e.g., signing and public key generation, then it is highly
|
||||
* recommended to call rustsecp256k1_v0_8_1_context_randomize on the context before calling
|
||||
* recommended to call rustsecp256k1_v0_9_0_context_randomize on the context before calling
|
||||
* those API functions. This will provide enhanced protection against side-channel
|
||||
* leakage, see rustsecp256k1_v0_8_1_context_randomize for details.
|
||||
* leakage, see rustsecp256k1_v0_9_0_context_randomize for details.
|
||||
*
|
||||
* Do not create a new context object for each operation, as construction and
|
||||
* randomization can take non-negligible time.
|
||||
*/
|
||||
|
||||
/** Copy a secp256k1 context object (into dynamically allocated memory).
|
||||
*
|
||||
* This function uses malloc to allocate memory. It is guaranteed that malloc is
|
||||
* called at most once for every call of this function. If you need to avoid dynamic
|
||||
* memory allocation entirely, see the functions in rustsecp256k1_v0_8_1_preallocated.h.
|
||||
* memory allocation entirely, see the functions in rustsecp256k1_v0_9_0_preallocated.h.
|
||||
*
|
||||
* Cloning rustsecp256k1_v0_9_0_context_static is not possible, and should not be emulated by
|
||||
* the caller (e.g., using memcpy). Create a new context instead.
|
||||
*
|
||||
* Returns: a newly created context object.
|
||||
* Args: ctx: an existing context to copy
|
||||
* Args: ctx: an existing context to copy (not rustsecp256k1_v0_9_0_context_static)
|
||||
*/
|
||||
|
||||
/** Destroy a secp256k1 context object (created in dynamically allocated memory).
|
||||
*
|
||||
* The context pointer may not be used afterwards.
|
||||
*
|
||||
* The context to destroy must have been created using rustsecp256k1_v0_8_1_context_create
|
||||
* or rustsecp256k1_v0_8_1_context_clone. If the context has instead been created using
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_create or rustsecp256k1_v0_8_1_context_preallocated_clone, the
|
||||
* behaviour is undefined. In that case, rustsecp256k1_v0_8_1_context_preallocated_destroy must
|
||||
* The context to destroy must have been created using rustsecp256k1_v0_9_0_context_create
|
||||
* or rustsecp256k1_v0_9_0_context_clone. If the context has instead been created using
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_create or rustsecp256k1_v0_9_0_context_preallocated_clone, the
|
||||
* behaviour is undefined. In that case, rustsecp256k1_v0_9_0_context_preallocated_destroy must
|
||||
* be used instead.
|
||||
*
|
||||
* Args: ctx: an existing context to destroy, constructed using
|
||||
* rustsecp256k1_v0_8_1_context_create or rustsecp256k1_v0_8_1_context_clone
|
||||
* rustsecp256k1_v0_9_0_context_create or rustsecp256k1_v0_9_0_context_clone
|
||||
* (i.e., not rustsecp256k1_v0_9_0_context_static).
|
||||
*/
|
||||
|
||||
/** Set a callback function to be called when an illegal argument is passed to
|
||||
* an API call. It will only trigger for violations that are mentioned
|
||||
* explicitly in the header.
|
||||
|
@ -327,11 +327,11 @@ SECP256K1_API void rustsecp256k1_v0_8_1_selftest(void);
|
|||
* USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build
|
||||
* has been configured with --enable-external-default-callbacks. Then the
|
||||
* following two symbols must be provided to link against:
|
||||
* - void rustsecp256k1_v0_8_1_default_illegal_callback_fn(const char* message, void* data);
|
||||
* - void rustsecp256k1_v0_8_1_default_error_callback_fn(const char* message, void* data);
|
||||
* - void rustsecp256k1_v0_9_0_default_illegal_callback_fn(const char *message, void *data);
|
||||
* - void rustsecp256k1_v0_9_0_default_error_callback_fn(const char *message, void *data);
|
||||
* The library can call these default handlers even before a proper callback data
|
||||
* pointer could have been set using rustsecp256k1_v0_8_1_context_set_illegal_callback or
|
||||
* rustsecp256k1_v0_8_1_context_set_error_callback, e.g., when the creation of a context
|
||||
* pointer could have been set using rustsecp256k1_v0_9_0_context_set_illegal_callback or
|
||||
* rustsecp256k1_v0_9_0_context_set_error_callback, e.g., when the creation of a context
|
||||
* fails. In this case, the corresponding default handler will be called with
|
||||
* the data pointer argument set to NULL.
|
||||
*
|
||||
|
@ -341,10 +341,10 @@ SECP256K1_API void rustsecp256k1_v0_8_1_selftest(void);
|
|||
* (NULL restores the default handler.)
|
||||
* data: the opaque pointer to pass to fun above, must be NULL for the default handler.
|
||||
*
|
||||
* See also rustsecp256k1_v0_8_1_context_set_error_callback.
|
||||
* See also rustsecp256k1_v0_9_0_context_set_error_callback.
|
||||
*/
|
||||
SECP256K1_API void rustsecp256k1_v0_8_1_context_set_illegal_callback(
|
||||
rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API void rustsecp256k1_v0_9_0_context_set_illegal_callback(
|
||||
rustsecp256k1_v0_9_0_context *ctx,
|
||||
void (*fun)(const char *message, void *data),
|
||||
const void *data
|
||||
) SECP256K1_ARG_NONNULL(1);
|
||||
|
@ -358,21 +358,21 @@ SECP256K1_API void rustsecp256k1_v0_8_1_context_set_illegal_callback(
|
|||
* This can only trigger in case of a hardware failure, miscompilation,
|
||||
* memory corruption, serious bug in the library, or other error would can
|
||||
* otherwise result in undefined behaviour. It will not trigger due to mere
|
||||
* incorrect usage of the API (see rustsecp256k1_v0_8_1_context_set_illegal_callback
|
||||
* incorrect usage of the API (see rustsecp256k1_v0_9_0_context_set_illegal_callback
|
||||
* for that). After this callback returns, anything may happen, including
|
||||
* crashing.
|
||||
*
|
||||
* Args: ctx: an existing context object.
|
||||
* In: fun: a pointer to a function to call when an internal error occurs,
|
||||
* taking a message and an opaque pointer (NULL restores the
|
||||
* default handler, see rustsecp256k1_v0_8_1_context_set_illegal_callback
|
||||
* default handler, see rustsecp256k1_v0_9_0_context_set_illegal_callback
|
||||
* for details).
|
||||
* data: the opaque pointer to pass to fun above, must be NULL for the default handler.
|
||||
*
|
||||
* See also rustsecp256k1_v0_8_1_context_set_illegal_callback.
|
||||
* See also rustsecp256k1_v0_9_0_context_set_illegal_callback.
|
||||
*/
|
||||
SECP256K1_API void rustsecp256k1_v0_8_1_context_set_error_callback(
|
||||
rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API void rustsecp256k1_v0_9_0_context_set_error_callback(
|
||||
rustsecp256k1_v0_9_0_context *ctx,
|
||||
void (*fun)(const char *message, void *data),
|
||||
const void *data
|
||||
) SECP256K1_ARG_NONNULL(1);
|
||||
|
@ -384,14 +384,12 @@ SECP256K1_API void rustsecp256k1_v0_8_1_context_set_error_callback(
|
|||
* In: size: amount of memory to be available as scratch space. Some extra
|
||||
* (<100 bytes) will be allocated for extra accounting.
|
||||
*/
|
||||
|
||||
/** Destroy a secp256k1 scratch space.
|
||||
*
|
||||
* The pointer may not be used afterwards.
|
||||
* Args: ctx: a secp256k1 context object.
|
||||
* scratch: space to destroy
|
||||
*/
|
||||
|
||||
/** Parse a variable-length public key into the pubkey object.
|
||||
*
|
||||
* Returns: 1 if the public key was fully valid.
|
||||
|
@ -406,9 +404,9 @@ SECP256K1_API void rustsecp256k1_v0_8_1_context_set_error_callback(
|
|||
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header
|
||||
* byte 0x06 or 0x07) format public keys.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_parse(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey* pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_parse(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *input,
|
||||
size_t inputlen
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
@ -423,16 +421,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_pa
|
|||
* In/Out: outputlen: a pointer to an integer which is initially set to the
|
||||
* size of output, and is overwritten with the written
|
||||
* size.
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_8_1_pubkey containing an
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_9_0_pubkey containing an
|
||||
* initialized public key.
|
||||
* flags: SECP256K1_EC_COMPRESSED if serialization should be in
|
||||
* compressed format, otherwise SECP256K1_EC_UNCOMPRESSED.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ec_pubkey_serialize(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ec_pubkey_serialize(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output,
|
||||
size_t *outputlen,
|
||||
const rustsecp256k1_v0_8_1_pubkey* pubkey,
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
unsigned int flags
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
|
@ -445,10 +443,10 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ec_pubkey_serialize(
|
|||
* In: pubkey1: first public key to compare
|
||||
* pubkey2: second public key to compare
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_cmp(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
const rustsecp256k1_v0_8_1_pubkey* pubkey1,
|
||||
const rustsecp256k1_v0_8_1_pubkey* pubkey2
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_cmp(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey1,
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey2
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Parse an ECDSA signature in compact (64 bytes) format.
|
||||
|
@ -466,9 +464,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_cm
|
|||
* S are zero, the resulting sig value is guaranteed to fail verification for
|
||||
* any message and public key.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature* sig,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sig,
|
||||
const unsigned char *input64
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
@ -487,9 +485,9 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact(
|
|||
* encoded numbers are out of range, signature verification with it is
|
||||
* guaranteed to fail for every message and public key.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature* sig,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_signature_parse_der(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sig,
|
||||
const unsigned char *input,
|
||||
size_t inputlen
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
@ -505,11 +503,11 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_parse_der(
|
|||
* if 0 was returned).
|
||||
* In: sig: a pointer to an initialized signature object
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_serialize_der(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_signature_serialize_der(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output,
|
||||
size_t *outputlen,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_signature* sig
|
||||
const rustsecp256k1_v0_9_0_ecdsa_signature *sig
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Serialize an ECDSA signature in compact (64 byte) format.
|
||||
|
@ -519,12 +517,12 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_serialize_der(
|
|||
* Out: output64: a pointer to a 64-byte array to store the compact serialization
|
||||
* In: sig: a pointer to an initialized signature object
|
||||
*
|
||||
* See rustsecp256k1_v0_8_1_ecdsa_signature_parse_compact for details about the encoding.
|
||||
* See rustsecp256k1_v0_9_0_ecdsa_signature_parse_compact for details about the encoding.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_serialize_compact(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_signature_serialize_compact(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output64,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_signature* sig
|
||||
const rustsecp256k1_v0_9_0_ecdsa_signature *sig
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Verify an ECDSA signature.
|
||||
|
@ -547,16 +545,16 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_serialize_compact(
|
|||
* form are accepted.
|
||||
*
|
||||
* If you need to accept ECDSA signatures from sources that do not obey this
|
||||
* rule, apply rustsecp256k1_v0_8_1_ecdsa_signature_normalize to the signature prior to
|
||||
* rule, apply rustsecp256k1_v0_9_0_ecdsa_signature_normalize to the signature prior to
|
||||
* verification, but be aware that doing so results in malleable signatures.
|
||||
*
|
||||
* For details, see the comments for that function.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ecdsa_verify(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_signature *sig,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ecdsa_verify(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const rustsecp256k1_v0_9_0_ecdsa_signature *sig,
|
||||
const unsigned char *msghash32,
|
||||
const rustsecp256k1_v0_8_1_pubkey *pubkey
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Convert a signature to a normalized lower-S form.
|
||||
|
@ -595,50 +593,48 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ecdsa_verify
|
|||
* accept various non-unique encodings, so care should be taken when this
|
||||
* property is required for an application.
|
||||
*
|
||||
* The rustsecp256k1_v0_8_1_ecdsa_sign function will by default create signatures in the
|
||||
* lower-S form, and rustsecp256k1_v0_8_1_ecdsa_verify will not accept others. In case
|
||||
* The rustsecp256k1_v0_9_0_ecdsa_sign function will by default create signatures in the
|
||||
* lower-S form, and rustsecp256k1_v0_9_0_ecdsa_verify will not accept others. In case
|
||||
* signatures come from a system that cannot enforce this property,
|
||||
* rustsecp256k1_v0_8_1_ecdsa_signature_normalize must be called before verification.
|
||||
* rustsecp256k1_v0_9_0_ecdsa_signature_normalize must be called before verification.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_signature_normalize(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature *sigout,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_signature *sigin
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_signature_normalize(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sigout,
|
||||
const rustsecp256k1_v0_9_0_ecdsa_signature *sigin
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
|
||||
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
|
||||
* extra entropy.
|
||||
*/
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_nonce_function rustsecp256k1_v0_8_1_nonce_function_rfc6979;
|
||||
|
||||
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_8_1_nonce_function_rfc6979). */
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_nonce_function rustsecp256k1_v0_8_1_nonce_function_default;
|
||||
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_9_0_nonce_function_rfc6979). */
|
||||
|
||||
/** Create an ECDSA signature.
|
||||
*
|
||||
* Returns: 1: signature created
|
||||
* 0: the nonce generation function failed, or the secret key was invalid.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: sig: pointer to an array where the signature will be placed.
|
||||
* In: msghash32: the 32-byte message hash being signed.
|
||||
* seckey: pointer to a 32-byte secret key.
|
||||
* noncefp: pointer to a nonce generation function. If NULL,
|
||||
* rustsecp256k1_v0_8_1_nonce_function_default is used.
|
||||
* rustsecp256k1_v0_9_0_nonce_function_default is used.
|
||||
* ndata: pointer to arbitrary data used by the nonce generation function
|
||||
* (can be NULL). If it is non-NULL and
|
||||
* rustsecp256k1_v0_8_1_nonce_function_default is used, then ndata must be a
|
||||
* rustsecp256k1_v0_9_0_nonce_function_default is used, then ndata must be a
|
||||
* pointer to 32-bytes of additional data.
|
||||
*
|
||||
* The created signature is always in lower-S form. See
|
||||
* rustsecp256k1_v0_8_1_ecdsa_signature_normalize for more details.
|
||||
* rustsecp256k1_v0_9_0_ecdsa_signature_normalize for more details.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_sign(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature *sig,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_sign(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sig,
|
||||
const unsigned char *msghash32,
|
||||
const unsigned char *seckey,
|
||||
rustsecp256k1_v0_8_1_nonce_function noncefp,
|
||||
rustsecp256k1_v0_9_0_nonce_function noncefp,
|
||||
const void *ndata
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
|
@ -654,8 +650,8 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_sign(
|
|||
* Args: ctx: pointer to a context object.
|
||||
* In: seckey: pointer to a 32-byte secret key.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_seckey_verify(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_seckey_verify(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const unsigned char *seckey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
|
||||
|
||||
|
@ -663,38 +659,38 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_seckey_ve
|
|||
*
|
||||
* Returns: 1: secret was valid, public key stores.
|
||||
* 0: secret was invalid, try again.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: pubkey: pointer to the created public key.
|
||||
* In: seckey: pointer to a 32-byte secret key.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_create(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_create(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *seckey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Negates a secret key in place.
|
||||
*
|
||||
* Returns: 0 if the given secret key is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify. 1 otherwise
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify. 1 otherwise
|
||||
* Args: ctx: pointer to a context object
|
||||
* In/Out: seckey: pointer to the 32-byte secret key to be negated. If the
|
||||
* secret key is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0 and
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify, this function returns 0 and
|
||||
* seckey will be set to some unspecified value.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_seckey_negate(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_seckey_negate(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
|
||||
|
||||
/** Same as rustsecp256k1_v0_8_1_ec_seckey_negate, but DEPRECATED. Will be removed in
|
||||
/** Same as rustsecp256k1_v0_9_0_ec_seckey_negate, but DEPRECATED. Will be removed in
|
||||
* future versions. */
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_negate(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_privkey_negate(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2)
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_ec_seckey_negate instead");
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_0_ec_seckey_negate instead");
|
||||
|
||||
/** Negates a public key in place.
|
||||
*
|
||||
|
@ -702,9 +698,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_n
|
|||
* Args: ctx: pointer to a context object
|
||||
* In/Out: pubkey: pointer to the public key to be negated.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_negate(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_negate(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
|
||||
|
||||
/** Tweak a secret key by adding tweak to it.
|
||||
|
@ -714,28 +710,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_ne
|
|||
* otherwise.
|
||||
* Args: ctx: pointer to a context object.
|
||||
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
|
||||
* invalid according to rustsecp256k1_v0_8_1_ec_seckey_verify, this
|
||||
* invalid according to rustsecp256k1_v0_9_0_ec_seckey_verify, this
|
||||
* function returns 0. seckey will be set to some unspecified
|
||||
* value if this function returns 0.
|
||||
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0. For
|
||||
* uniformly random 32-byte arrays the chance of being invalid
|
||||
* is negligible (around 1 in 2^128).
|
||||
* In: tweak32: pointer to a 32-byte tweak, which must be valid according to
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify or 32 zero bytes. For uniformly
|
||||
* random 32-byte tweaks, the chance of being invalid is
|
||||
* negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_seckey_tweak_add(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_seckey_tweak_add(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Same as rustsecp256k1_v0_8_1_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
|
||||
/** Same as rustsecp256k1_v0_9_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
|
||||
* future versions. */
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_tweak_add(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_privkey_tweak_add(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3)
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_ec_seckey_tweak_add instead");
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_0_ec_seckey_tweak_add instead");
|
||||
|
||||
/** Tweak a public key by adding tweak times the generator to it.
|
||||
*
|
||||
|
@ -745,14 +741,14 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_t
|
|||
* Args: ctx: pointer to a context object.
|
||||
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
|
||||
* invalid value if this function returns 0.
|
||||
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0. For
|
||||
* uniformly random 32-byte arrays the chance of being invalid
|
||||
* is negligible (around 1 in 2^128).
|
||||
* In: tweak32: pointer to a 32-byte tweak, which must be valid according to
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify or 32 zero bytes. For uniformly
|
||||
* random 32-byte tweaks, the chance of being invalid is
|
||||
* negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_tweak_add(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_tweak_add(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
@ -761,28 +757,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_tw
|
|||
* Returns: 0 if the arguments are invalid. 1 otherwise.
|
||||
* Args: ctx: pointer to a context object.
|
||||
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
|
||||
* invalid according to rustsecp256k1_v0_8_1_ec_seckey_verify, this
|
||||
* invalid according to rustsecp256k1_v0_9_0_ec_seckey_verify, this
|
||||
* function returns 0. seckey will be set to some unspecified
|
||||
* value if this function returns 0.
|
||||
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0. For
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify, this function returns 0. For
|
||||
* uniformly random 32-byte arrays the chance of being invalid
|
||||
* is negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_seckey_tweak_mul(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_seckey_tweak_mul(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Same as rustsecp256k1_v0_8_1_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
|
||||
/** Same as rustsecp256k1_v0_9_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
|
||||
* future versions. */
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_tweak_mul(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_privkey_tweak_mul(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3)
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_ec_seckey_tweak_mul instead");
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_0_ec_seckey_tweak_mul instead");
|
||||
|
||||
/** Tweak a public key by multiplying it by a tweak value.
|
||||
*
|
||||
|
@ -791,22 +787,22 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_privkey_t
|
|||
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
|
||||
* invalid value if this function returns 0.
|
||||
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
|
||||
* rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0. For
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify, this function returns 0. For
|
||||
* uniformly random 32-byte arrays the chance of being invalid
|
||||
* is negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_tweak_mul(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_tweak_mul(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Randomizes the context to provide enhanced protection against side-channel leakage.
|
||||
*
|
||||
* Returns: 1: randomization successful (or called on copy of rustsecp256k1_v0_8_1_context_static)
|
||||
* Returns: 1: randomization successful
|
||||
* 0: error
|
||||
* Args: ctx: pointer to a context object.
|
||||
* In: seed32: pointer to a 32-byte random seed (NULL resets to initial state)
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* In: seed32: pointer to a 32-byte random seed (NULL resets to initial state).
|
||||
*
|
||||
* While secp256k1 code is written and tested to be constant-time no matter what
|
||||
* secret values are, it is possible that a compiler may output code which is not,
|
||||
|
@ -816,29 +812,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_tw
|
|||
* certain computations which involve secret keys.
|
||||
*
|
||||
* It is highly recommended to call this function on contexts returned from
|
||||
* rustsecp256k1_v0_8_1_context_create or rustsecp256k1_v0_8_1_context_clone (or from the corresponding
|
||||
* functions in rustsecp256k1_v0_8_1_preallocated.h) before using these contexts to call API
|
||||
* rustsecp256k1_v0_9_0_context_create or rustsecp256k1_v0_9_0_context_clone (or from the corresponding
|
||||
* functions in rustsecp256k1_v0_9_0_preallocated.h) before using these contexts to call API
|
||||
* functions that perform computations involving secret keys, e.g., signing and
|
||||
* public key generation. It is possible to call this function more than once on
|
||||
* the same context, and doing so before every few computations involving secret
|
||||
* keys is recommended as a defense-in-depth measure.
|
||||
* keys is recommended as a defense-in-depth measure. Randomization of the static
|
||||
* context rustsecp256k1_v0_9_0_context_static is not supported.
|
||||
*
|
||||
* Currently, the random seed is mainly used for blinding multiplications of a
|
||||
* secret scalar with the elliptic curve base point. Multiplications of this
|
||||
* kind are performed by exactly those API functions which are documented to
|
||||
* require a context that is not the rustsecp256k1_v0_8_1_context_static. As a rule of thumb,
|
||||
* require a context that is not rustsecp256k1_v0_9_0_context_static. As a rule of thumb,
|
||||
* these are all functions which take a secret key (or a keypair) as an input.
|
||||
* A notable exception to that rule is the ECDH module, which relies on a different
|
||||
* kind of elliptic curve point multiplication and thus does not benefit from
|
||||
* enhanced protection against side-channel leakage currently.
|
||||
*
|
||||
* It is safe call this function on a copy of rustsecp256k1_v0_8_1_context_static in writable
|
||||
* memory (e.g., obtained via rustsecp256k1_v0_8_1_context_clone). In that case, this
|
||||
* function is guaranteed to return 1, but the call will have no effect because
|
||||
* the static context (or a copy thereof) is not meant to be randomized.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_context_randomize(
|
||||
rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_context_randomize(
|
||||
rustsecp256k1_v0_9_0_context *ctx,
|
||||
const unsigned char *seed32
|
||||
) SECP256K1_ARG_NONNULL(1);
|
||||
|
||||
|
@ -851,10 +843,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_context_rand
|
|||
* In: ins: pointer to array of pointers to public keys.
|
||||
* n: the number of public keys to add together (must be at least 1).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_combine(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *out,
|
||||
const rustsecp256k1_v0_8_1_pubkey * const * ins,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ec_pubkey_combine(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *out,
|
||||
const rustsecp256k1_v0_9_0_pubkey * const *ins,
|
||||
size_t n
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
@ -874,8 +866,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ec_pubkey_co
|
|||
* msg: pointer to an array containing the message
|
||||
* msglen: length of the message array
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_tagged_sha256(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_tagged_sha256(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *hash32,
|
||||
const unsigned char *tag,
|
||||
size_t taglen,
|
||||
|
|
|
@ -10,15 +10,15 @@ extern "C" {
|
|||
/** A pointer to a function that hashes an EC point to obtain an ECDH secret
|
||||
*
|
||||
* Returns: 1 if the point was successfully hashed.
|
||||
* 0 will cause rustsecp256k1_v0_8_1_ecdh to fail and return 0.
|
||||
* 0 will cause rustsecp256k1_v0_9_0_ecdh to fail and return 0.
|
||||
* Other return values are not allowed, and the behaviour of
|
||||
* rustsecp256k1_v0_8_1_ecdh is undefined for other return values.
|
||||
* rustsecp256k1_v0_9_0_ecdh is undefined for other return values.
|
||||
* Out: output: pointer to an array to be filled by the function
|
||||
* In: x32: pointer to a 32-byte x coordinate
|
||||
* y32: pointer to a 32-byte y coordinate
|
||||
* data: arbitrary data pointer that is passed through
|
||||
*/
|
||||
typedef int (*rustsecp256k1_v0_8_1_ecdh_hash_function)(
|
||||
typedef int (*rustsecp256k1_v0_9_0_ecdh_hash_function)(
|
||||
unsigned char *output,
|
||||
const unsigned char *x32,
|
||||
const unsigned char *y32,
|
||||
|
@ -27,11 +27,11 @@ typedef int (*rustsecp256k1_v0_8_1_ecdh_hash_function)(
|
|||
|
||||
/** An implementation of SHA256 hash function that applies to compressed public key.
|
||||
* Populates the output parameter with 32 bytes. */
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_ecdh_hash_function rustsecp256k1_v0_8_1_ecdh_hash_function_sha256;
|
||||
SECP256K1_API const rustsecp256k1_v0_9_0_ecdh_hash_function rustsecp256k1_v0_9_0_ecdh_hash_function_sha256;
|
||||
|
||||
/** A default ECDH hash function (currently equal to rustsecp256k1_v0_8_1_ecdh_hash_function_sha256).
|
||||
/** A default ECDH hash function (currently equal to rustsecp256k1_v0_9_0_ecdh_hash_function_sha256).
|
||||
* Populates the output parameter with 32 bytes. */
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_ecdh_hash_function rustsecp256k1_v0_8_1_ecdh_hash_function_default;
|
||||
SECP256K1_API const rustsecp256k1_v0_9_0_ecdh_hash_function rustsecp256k1_v0_9_0_ecdh_hash_function_default;
|
||||
|
||||
/** Compute an EC Diffie-Hellman secret in constant time
|
||||
*
|
||||
|
@ -39,20 +39,20 @@ SECP256K1_API extern const rustsecp256k1_v0_8_1_ecdh_hash_function rustsecp256k1
|
|||
* 0: scalar was invalid (zero or overflow) or hashfp returned 0
|
||||
* Args: ctx: pointer to a context object.
|
||||
* Out: output: pointer to an array to be filled by hashfp.
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_8_1_pubkey containing an initialized public key.
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_9_0_pubkey containing an initialized public key.
|
||||
* seckey: a 32-byte scalar with which to multiply the point.
|
||||
* hashfp: pointer to a hash function. If NULL,
|
||||
* rustsecp256k1_v0_8_1_ecdh_hash_function_sha256 is used
|
||||
* rustsecp256k1_v0_9_0_ecdh_hash_function_sha256 is used
|
||||
* (in which case, 32 bytes will be written to output).
|
||||
* data: arbitrary data pointer that is passed through to hashfp
|
||||
* (can be NULL for rustsecp256k1_v0_8_1_ecdh_hash_function_sha256).
|
||||
* (can be NULL for rustsecp256k1_v0_9_0_ecdh_hash_function_sha256).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ecdh(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ecdh(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output,
|
||||
const rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *seckey,
|
||||
rustsecp256k1_v0_8_1_ecdh_hash_function hashfp,
|
||||
rustsecp256k1_v0_9_0_ecdh_hash_function hashfp,
|
||||
void *data
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
#ifndef SECP256K1_ELLSWIFT_H
|
||||
#define SECP256K1_ELLSWIFT_H
|
||||
|
||||
#include "secp256k1.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* This module provides an implementation of ElligatorSwift as well as a
|
||||
* version of x-only ECDH using it (including compatibility with BIP324).
|
||||
*
|
||||
* ElligatorSwift is described in https://eprint.iacr.org/2022/759 by
|
||||
* Chavez-Saab, Rodriguez-Henriquez, and Tibouchi. It permits encoding
|
||||
* uniformly chosen public keys as 64-byte arrays which are indistinguishable
|
||||
* from uniformly random arrays.
|
||||
*
|
||||
* Let f be the function from pairs of field elements to point X coordinates,
|
||||
* defined as follows (all operations modulo p = 2^256 - 2^32 - 977)
|
||||
* f(u,t):
|
||||
* - Let C = 0xa2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f852,
|
||||
* a square root of -3.
|
||||
* - If u=0, set u=1 instead.
|
||||
* - If t=0, set t=1 instead.
|
||||
* - If u^3 + t^2 + 7 = 0, multiply t by 2.
|
||||
* - Let X = (u^3 + 7 - t^2) / (2 * t)
|
||||
* - Let Y = (X + t) / (C * u)
|
||||
* - Return the first in [u + 4 * Y^2, (-X/Y - u) / 2, (X/Y - u) / 2] that is an
|
||||
* X coordinate on the curve (at least one of them is, for any u and t).
|
||||
*
|
||||
* Then an ElligatorSwift encoding of x consists of the 32-byte big-endian
|
||||
* encodings of field elements u and t concatenated, where f(u,t) = x.
|
||||
* The encoding algorithm is described in the paper, and effectively picks a
|
||||
* uniformly random pair (u,t) among those which encode x.
|
||||
*
|
||||
* If the Y coordinate is relevant, it is given the same parity as t.
|
||||
*
|
||||
* Changes w.r.t. the the paper:
|
||||
* - The u=0, t=0, and u^3+t^2+7=0 conditions result in decoding to the point
|
||||
* at infinity in the paper. Here they are remapped to finite points.
|
||||
* - The paper uses an additional encoding bit for the parity of y. Here the
|
||||
* parity of t is used (negating t does not affect the decoded x coordinate,
|
||||
* so this is possible).
|
||||
*
|
||||
* For mathematical background about the scheme, see the doc/ellswift.md file.
|
||||
*/
|
||||
|
||||
/** A pointer to a function used by rustsecp256k1_v0_9_0_ellswift_xdh to hash the shared X
|
||||
* coordinate along with the encoded public keys to a uniform shared secret.
|
||||
*
|
||||
* Returns: 1 if a shared secret was successfully computed.
|
||||
* 0 will cause rustsecp256k1_v0_9_0_ellswift_xdh to fail and return 0.
|
||||
* Other return values are not allowed, and the behaviour of
|
||||
* rustsecp256k1_v0_9_0_ellswift_xdh is undefined for other return values.
|
||||
* Out: output: pointer to an array to be filled by the function
|
||||
* In: x32: pointer to the 32-byte serialized X coordinate
|
||||
* of the resulting shared point (will not be NULL)
|
||||
* ell_a64: pointer to the 64-byte encoded public key of party A
|
||||
* (will not be NULL)
|
||||
* ell_b64: pointer to the 64-byte encoded public key of party B
|
||||
* (will not be NULL)
|
||||
* data: arbitrary data pointer that is passed through
|
||||
*/
|
||||
typedef int (*rustsecp256k1_v0_9_0_ellswift_xdh_hash_function)(
|
||||
unsigned char *output,
|
||||
const unsigned char *x32,
|
||||
const unsigned char *ell_a64,
|
||||
const unsigned char *ell_b64,
|
||||
void *data
|
||||
);
|
||||
|
||||
/** An implementation of an rustsecp256k1_v0_9_0_ellswift_xdh_hash_function which uses
|
||||
* SHA256(prefix64 || ell_a64 || ell_b64 || x32), where prefix64 is the 64-byte
|
||||
* array pointed to by data. */
|
||||
SECP256K1_API const rustsecp256k1_v0_9_0_ellswift_xdh_hash_function rustsecp256k1_v0_9_0_ellswift_xdh_hash_function_prefix;
|
||||
|
||||
/** An implementation of an rustsecp256k1_v0_9_0_ellswift_xdh_hash_function compatible with
|
||||
* BIP324. It returns H_tag(ell_a64 || ell_b64 || x32), where H_tag is the
|
||||
* BIP340 tagged hash function with tag "bip324_ellswift_xonly_ecdh". Equivalent
|
||||
* to rustsecp256k1_v0_9_0_ellswift_xdh_hash_function_prefix with prefix64 set to
|
||||
* SHA256("bip324_ellswift_xonly_ecdh")||SHA256("bip324_ellswift_xonly_ecdh").
|
||||
* The data argument is ignored. */
|
||||
SECP256K1_API const rustsecp256k1_v0_9_0_ellswift_xdh_hash_function rustsecp256k1_v0_9_0_ellswift_xdh_hash_function_bip324;
|
||||
|
||||
/** Construct a 64-byte ElligatorSwift encoding of a given pubkey.
|
||||
*
|
||||
* Returns: 1 always.
|
||||
* Args: ctx: pointer to a context object
|
||||
* Out: ell64: pointer to a 64-byte array to be filled
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_9_0_pubkey containing an
|
||||
* initialized public key
|
||||
* rnd32: pointer to 32 bytes of randomness
|
||||
*
|
||||
* It is recommended that rnd32 consists of 32 uniformly random bytes, not
|
||||
* known to any adversary trying to detect whether public keys are being
|
||||
* encoded, though 16 bytes of randomness (padded to an array of 32 bytes,
|
||||
* e.g., with zeros) suffice to make the result indistinguishable from
|
||||
* uniform. The randomness in rnd32 must not be a deterministic function of
|
||||
* the pubkey (it can be derived from the private key, though).
|
||||
*
|
||||
* It is not guaranteed that the computed encoding is stable across versions
|
||||
* of the library, even if all arguments to this function (including rnd32)
|
||||
* are the same.
|
||||
*
|
||||
* This function runs in variable time.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ellswift_encode(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *ell64,
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *rnd32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Decode a 64-bytes ElligatorSwift encoded public key.
|
||||
*
|
||||
* Returns: always 1
|
||||
* Args: ctx: pointer to a context object
|
||||
* Out: pubkey: pointer to a rustsecp256k1_v0_9_0_pubkey that will be filled
|
||||
* In: ell64: pointer to a 64-byte array to decode
|
||||
*
|
||||
* This function runs in variable time.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ellswift_decode(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const unsigned char *ell64
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Compute an ElligatorSwift public key for a secret key.
|
||||
*
|
||||
* Returns: 1: secret was valid, public key was stored.
|
||||
* 0: secret was invalid, try again.
|
||||
* Args: ctx: pointer to a context object
|
||||
* Out: ell64: pointer to a 64-byte array to receive the ElligatorSwift
|
||||
* public key
|
||||
* In: seckey32: pointer to a 32-byte secret key
|
||||
* auxrnd32: (optional) pointer to 32 bytes of randomness
|
||||
*
|
||||
* Constant time in seckey and auxrnd32, but not in the resulting public key.
|
||||
*
|
||||
* It is recommended that auxrnd32 contains 32 uniformly random bytes, though
|
||||
* it is optional (and does result in encodings that are indistinguishable from
|
||||
* uniform even without any auxrnd32). It differs from the (mandatory) rnd32
|
||||
* argument to rustsecp256k1_v0_9_0_ellswift_encode in this regard.
|
||||
*
|
||||
* This function can be used instead of calling rustsecp256k1_v0_9_0_ec_pubkey_create
|
||||
* followed by rustsecp256k1_v0_9_0_ellswift_encode. It is safer, as it uses the secret
|
||||
* key as entropy for the encoding (supplemented with auxrnd32, if provided).
|
||||
*
|
||||
* Like rustsecp256k1_v0_9_0_ellswift_encode, this function does not guarantee that the
|
||||
* computed encoding is stable across versions of the library, even if all
|
||||
* arguments (including auxrnd32) are the same.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ellswift_create(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *ell64,
|
||||
const unsigned char *seckey32,
|
||||
const unsigned char *auxrnd32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Given a private key, and ElligatorSwift public keys sent in both directions,
|
||||
* compute a shared secret using x-only Elliptic Curve Diffie-Hellman (ECDH).
|
||||
*
|
||||
* Returns: 1: shared secret was successfully computed
|
||||
* 0: secret was invalid or hashfp returned 0
|
||||
* Args: ctx: pointer to a context object.
|
||||
* Out: output: pointer to an array to be filled by hashfp.
|
||||
* In: ell_a64: pointer to the 64-byte encoded public key of party A
|
||||
* (will not be NULL)
|
||||
* ell_b64: pointer to the 64-byte encoded public key of party B
|
||||
* (will not be NULL)
|
||||
* seckey32: a pointer to our 32-byte secret key
|
||||
* party: boolean indicating which party we are: zero if we are
|
||||
* party A, non-zero if we are party B. seckey32 must be
|
||||
* the private key corresponding to that party's ell_?64.
|
||||
* This correspondence is not checked.
|
||||
* hashfp: pointer to a hash function.
|
||||
* data: arbitrary data pointer passed through to hashfp.
|
||||
*
|
||||
* Constant time in seckey32.
|
||||
*
|
||||
* This function is more efficient than decoding the public keys, and performing
|
||||
* ECDH on them.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ellswift_xdh(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output,
|
||||
const unsigned char *ell_a64,
|
||||
const unsigned char *ell_b64,
|
||||
const unsigned char *seckey32,
|
||||
int party,
|
||||
rustsecp256k1_v0_9_0_ellswift_xdh_hash_function hashfp,
|
||||
void *data
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(7);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* SECP256K1_ELLSWIFT_H */
|
|
@ -16,12 +16,12 @@ extern "C" {
|
|||
* guaranteed to be portable between different platforms or versions. It is
|
||||
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
|
||||
* If you need to convert to a format suitable for storage, transmission, use
|
||||
* use rustsecp256k1_v0_8_1_xonly_pubkey_serialize and rustsecp256k1_v0_8_1_xonly_pubkey_parse. To
|
||||
* compare keys, use rustsecp256k1_v0_8_1_xonly_pubkey_cmp.
|
||||
* use rustsecp256k1_v0_9_0_xonly_pubkey_serialize and rustsecp256k1_v0_9_0_xonly_pubkey_parse. To
|
||||
* compare keys, use rustsecp256k1_v0_9_0_xonly_pubkey_cmp.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned char data[64];
|
||||
} rustsecp256k1_v0_8_1_xonly_pubkey;
|
||||
} rustsecp256k1_v0_9_0_xonly_pubkey;
|
||||
|
||||
/** Opaque data structure that holds a keypair consisting of a secret and a
|
||||
* public key.
|
||||
|
@ -32,7 +32,7 @@ typedef struct {
|
|||
*/
|
||||
typedef struct {
|
||||
unsigned char data[96];
|
||||
} rustsecp256k1_v0_8_1_keypair;
|
||||
} rustsecp256k1_v0_9_0_keypair;
|
||||
|
||||
/** Parse a 32-byte sequence into a xonly_pubkey object.
|
||||
*
|
||||
|
@ -44,9 +44,9 @@ typedef struct {
|
|||
* parsed version of input. If not, it's set to an invalid value.
|
||||
* In: input32: pointer to a serialized xonly_pubkey.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey_parse(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_xonly_pubkey* pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_xonly_pubkey_parse(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_xonly_pubkey *pubkey,
|
||||
const unsigned char *input32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
@ -56,12 +56,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey
|
|||
*
|
||||
* Args: ctx: a secp256k1 context object.
|
||||
* Out: output32: a pointer to a 32-byte array to place the serialized key in.
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_8_1_xonly_pubkey containing an initialized public key.
|
||||
* In: pubkey: a pointer to a rustsecp256k1_v0_9_0_xonly_pubkey containing an initialized public key.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_xonly_pubkey_serialize(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_xonly_pubkey_serialize(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output32,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey* pubkey
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *pubkey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Compare two x-only public keys using lexicographic order
|
||||
|
@ -73,13 +73,13 @@ SECP256K1_API int rustsecp256k1_v0_8_1_xonly_pubkey_serialize(
|
|||
* In: pubkey1: first public key to compare
|
||||
* pubkey2: second public key to compare
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_xonly_pubkey_cmp(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey* pk1,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey* pk2
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_xonly_pubkey_cmp(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *pk1,
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *pk2
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Converts a rustsecp256k1_v0_8_1_pubkey into a rustsecp256k1_v0_8_1_xonly_pubkey.
|
||||
/** Converts a rustsecp256k1_v0_9_0_pubkey into a rustsecp256k1_v0_9_0_xonly_pubkey.
|
||||
*
|
||||
* Returns: 1 always.
|
||||
*
|
||||
|
@ -90,11 +90,11 @@ SECP256K1_API int rustsecp256k1_v0_8_1_xonly_pubkey_cmp(
|
|||
* the negation of the pubkey and set to 0 otherwise.
|
||||
* In: pubkey: pointer to a public key that is converted.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey_from_pubkey(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_xonly_pubkey *xonly_pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_xonly_pubkey_from_pubkey(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_xonly_pubkey *xonly_pubkey,
|
||||
int *pk_parity,
|
||||
const rustsecp256k1_v0_8_1_pubkey *pubkey
|
||||
const rustsecp256k1_v0_9_0_pubkey *pubkey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Tweak an x-only public key by adding the generator multiplied with tweak32
|
||||
|
@ -102,7 +102,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey
|
|||
*
|
||||
* Note that the resulting point can not in general be represented by an x-only
|
||||
* pubkey because it may have an odd Y coordinate. Instead, the output_pubkey
|
||||
* is a normal rustsecp256k1_v0_8_1_pubkey.
|
||||
* is a normal rustsecp256k1_v0_9_0_pubkey.
|
||||
*
|
||||
* Returns: 0 if the arguments are invalid or the resulting public key would be
|
||||
* invalid (only when the tweak is the negation of the corresponding
|
||||
|
@ -112,24 +112,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey
|
|||
* Out: output_pubkey: pointer to a public key to store the result. Will be set
|
||||
* to an invalid value if this function returns 0.
|
||||
* In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to.
|
||||
* tweak32: pointer to a 32-byte tweak. If the tweak is invalid
|
||||
* according to rustsecp256k1_v0_8_1_ec_seckey_verify, this function
|
||||
* returns 0. For uniformly random 32-byte arrays the
|
||||
* chance of being invalid is negligible (around 1 in 2^128).
|
||||
* tweak32: pointer to a 32-byte tweak, which must be valid
|
||||
* according to rustsecp256k1_v0_9_0_ec_seckey_verify or 32 zero
|
||||
* bytes. For uniformly random 32-byte tweaks, the chance of
|
||||
* being invalid is negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey_tweak_add(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *output_pubkey,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey *internal_pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_xonly_pubkey_tweak_add(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *output_pubkey,
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *internal_pubkey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Checks that a tweaked pubkey is the result of calling
|
||||
* rustsecp256k1_v0_8_1_xonly_pubkey_tweak_add with internal_pubkey and tweak32.
|
||||
* rustsecp256k1_v0_9_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32.
|
||||
*
|
||||
* The tweaked pubkey is represented by its 32-byte x-only serialization and
|
||||
* its pk_parity, which can both be obtained by converting the result of
|
||||
* tweak_add to a rustsecp256k1_v0_8_1_xonly_pubkey.
|
||||
* tweak_add to a rustsecp256k1_v0_9_0_xonly_pubkey.
|
||||
*
|
||||
* Note that this alone does _not_ verify that the tweaked pubkey is a
|
||||
* commitment. If the tweak is not chosen in a specific way, the tweaked pubkey
|
||||
|
@ -142,16 +142,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey
|
|||
* tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization
|
||||
* is passed in as tweaked_pubkey32). This must match the
|
||||
* pk_parity value that is returned when calling
|
||||
* rustsecp256k1_v0_8_1_xonly_pubkey with the tweaked pubkey, or
|
||||
* rustsecp256k1_v0_9_0_xonly_pubkey with the tweaked pubkey, or
|
||||
* this function will fail.
|
||||
* internal_pubkey: pointer to an x-only public key object to apply the tweak to.
|
||||
* tweak32: pointer to a 32-byte tweak.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey_tweak_add_check(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_xonly_pubkey_tweak_add_check(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const unsigned char *tweaked_pubkey32,
|
||||
int tweaked_pk_parity,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey *internal_pubkey,
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *internal_pubkey,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5);
|
||||
|
||||
|
@ -159,13 +159,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_xonly_pubkey
|
|||
*
|
||||
* Returns: 1: secret was valid, keypair is ready to use
|
||||
* 0: secret was invalid, try again with a different secret
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: keypair: pointer to the created keypair.
|
||||
* In: seckey: pointer to a 32-byte secret key.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_create(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_keypair *keypair,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_keypair_create(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_keypair *keypair,
|
||||
const unsigned char *seckey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
@ -176,53 +176,51 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_crea
|
|||
* Out: seckey: pointer to a 32-byte buffer for the secret key.
|
||||
* In: keypair: pointer to a keypair.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_sec(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_keypair_sec(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *seckey,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Get the public key from a keypair.
|
||||
*
|
||||
* Returns: 1 always.
|
||||
* Args: ctx: pointer to a context object.
|
||||
* Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to
|
||||
* the keypair public key. If not, it's set to an invalid value.
|
||||
* Out: pubkey: pointer to a pubkey object, set to the keypair public key.
|
||||
* In: keypair: pointer to a keypair.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_pub(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_keypair_pub(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Get the x-only public key from a keypair.
|
||||
*
|
||||
* This is the same as calling rustsecp256k1_v0_8_1_keypair_pub and then
|
||||
* rustsecp256k1_v0_8_1_xonly_pubkey_from_pubkey.
|
||||
* This is the same as calling rustsecp256k1_v0_9_0_keypair_pub and then
|
||||
* rustsecp256k1_v0_9_0_xonly_pubkey_from_pubkey.
|
||||
*
|
||||
* Returns: 1 always.
|
||||
* Args: ctx: pointer to a context object.
|
||||
* Out: pubkey: pointer to an xonly_pubkey object. If 1 is returned, it is set
|
||||
* to the keypair public key after converting it to an
|
||||
* xonly_pubkey. If not, it's set to an invalid value.
|
||||
* Out: pubkey: pointer to an xonly_pubkey object, set to the keypair
|
||||
* public key after converting it to an xonly_pubkey.
|
||||
* pk_parity: Ignored if NULL. Otherwise, pointer to an integer that will be set to the
|
||||
* pk_parity argument of rustsecp256k1_v0_8_1_xonly_pubkey_from_pubkey.
|
||||
* pk_parity argument of rustsecp256k1_v0_9_0_xonly_pubkey_from_pubkey.
|
||||
* In: keypair: pointer to a keypair.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_xonly_pub(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_xonly_pubkey *pubkey,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_keypair_xonly_pub(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_xonly_pubkey *pubkey,
|
||||
int *pk_parity,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Tweak a keypair by adding tweak32 to the secret key and updating the public
|
||||
* key accordingly.
|
||||
*
|
||||
* Calling this function and then rustsecp256k1_v0_8_1_keypair_pub results in the same
|
||||
* public key as calling rustsecp256k1_v0_8_1_keypair_xonly_pub and then
|
||||
* rustsecp256k1_v0_8_1_xonly_pubkey_tweak_add.
|
||||
* Calling this function and then rustsecp256k1_v0_9_0_keypair_pub results in the same
|
||||
* public key as calling rustsecp256k1_v0_9_0_keypair_xonly_pub and then
|
||||
* rustsecp256k1_v0_9_0_xonly_pubkey_tweak_add.
|
||||
*
|
||||
* Returns: 0 if the arguments are invalid or the resulting keypair would be
|
||||
* invalid (only when the tweak is the negation of the keypair's
|
||||
|
@ -231,14 +229,14 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_xonl
|
|||
* Args: ctx: pointer to a context object.
|
||||
* In/Out: keypair: pointer to a keypair to apply the tweak to. Will be set to
|
||||
* an invalid value if this function returns 0.
|
||||
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according
|
||||
* to rustsecp256k1_v0_8_1_ec_seckey_verify, this function returns 0. For
|
||||
* uniformly random 32-byte arrays the chance of being invalid
|
||||
* is negligible (around 1 in 2^128).
|
||||
* In: tweak32: pointer to a 32-byte tweak, which must be valid according to
|
||||
* rustsecp256k1_v0_9_0_ec_seckey_verify or 32 zero bytes. For uniformly
|
||||
* random 32-byte tweaks, the chance of being invalid is
|
||||
* negligible (around 1 in 2^128).
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_keypair_xonly_tweak_add(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_keypair *keypair,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_keypair_xonly_tweak_add(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_keypair *keypair,
|
||||
const unsigned char *tweak32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ extern "C" {
|
|||
* objects created by functions in secp256k1.h, i.e., they can be passed to any
|
||||
* API function that expects a context object (see secp256k1.h for details). The
|
||||
* only exception is that context objects created by functions in this module
|
||||
* must be destroyed using rustsecp256k1_v0_8_1_context_preallocated_destroy (in this
|
||||
* module) instead of rustsecp256k1_v0_8_1_context_destroy (in secp256k1.h).
|
||||
* must be destroyed using rustsecp256k1_v0_9_0_context_preallocated_destroy (in this
|
||||
* module) instead of rustsecp256k1_v0_9_0_context_destroy (in secp256k1.h).
|
||||
*
|
||||
* It is guaranteed that functions in this module will not call malloc or its
|
||||
* friends realloc, calloc, and free.
|
||||
|
@ -27,24 +27,24 @@ extern "C" {
|
|||
* caller-provided memory.
|
||||
*
|
||||
* The purpose of this function is to determine how much memory must be provided
|
||||
* to rustsecp256k1_v0_8_1_context_preallocated_create.
|
||||
* to rustsecp256k1_v0_9_0_context_preallocated_create.
|
||||
*
|
||||
* Returns: the required size of the caller-provided memory block
|
||||
* In: flags: which parts of the context to initialize.
|
||||
*/
|
||||
SECP256K1_API size_t rustsecp256k1_v0_8_1_context_preallocated_size(
|
||||
SECP256K1_API size_t rustsecp256k1_v0_9_0_context_preallocated_size(
|
||||
unsigned int flags
|
||||
) SECP256K1_WARN_UNUSED_RESULT;
|
||||
|
||||
/** Create a secp256k1 context object in caller-provided memory.
|
||||
*
|
||||
* The caller must provide a pointer to a rewritable contiguous block of memory
|
||||
* of size at least rustsecp256k1_v0_8_1_context_preallocated_size(flags) bytes, suitably
|
||||
* of size at least rustsecp256k1_v0_9_0_context_preallocated_size(flags) bytes, suitably
|
||||
* aligned to hold an object of any type.
|
||||
*
|
||||
* The block of memory is exclusively owned by the created context object during
|
||||
* the lifetime of this context object, which begins with the call to this
|
||||
* function and ends when a call to rustsecp256k1_v0_8_1_context_preallocated_destroy
|
||||
* function and ends when a call to rustsecp256k1_v0_9_0_context_preallocated_destroy
|
||||
* (which destroys the context object again) returns. During the lifetime of the
|
||||
* context object, the caller is obligated not to access this block of memory,
|
||||
* i.e., the caller may not read or write the memory, e.g., by copying the memory
|
||||
|
@ -54,16 +54,16 @@ SECP256K1_API size_t rustsecp256k1_v0_8_1_context_preallocated_size(
|
|||
*
|
||||
* Returns: a newly created context object.
|
||||
* In: prealloc: a pointer to a rewritable contiguous block of memory of
|
||||
* size at least rustsecp256k1_v0_8_1_context_preallocated_size(flags)
|
||||
* size at least rustsecp256k1_v0_9_0_context_preallocated_size(flags)
|
||||
* bytes, as detailed above.
|
||||
* flags: which parts of the context to initialize.
|
||||
*
|
||||
* See rustsecp256k1_v0_8_1_context_create (in secp256k1.h) for further details.
|
||||
* See rustsecp256k1_v0_9_0_context_create (in secp256k1.h) for further details.
|
||||
*
|
||||
* See also rustsecp256k1_v0_8_1_context_randomize (in secp256k1.h)
|
||||
* and rustsecp256k1_v0_8_1_context_preallocated_destroy.
|
||||
* See also rustsecp256k1_v0_9_0_context_randomize (in secp256k1.h)
|
||||
* and rustsecp256k1_v0_9_0_context_preallocated_destroy.
|
||||
*/
|
||||
SECP256K1_API rustsecp256k1_v0_8_1_context* rustsecp256k1_v0_8_1_context_preallocated_create(
|
||||
SECP256K1_API rustsecp256k1_v0_9_0_context *rustsecp256k1_v0_9_0_context_preallocated_create(
|
||||
void *prealloc,
|
||||
unsigned int flags
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
|
||||
|
@ -74,28 +74,31 @@ SECP256K1_API rustsecp256k1_v0_8_1_context* rustsecp256k1_v0_8_1_context_preallo
|
|||
* Returns: the required size of the caller-provided memory block.
|
||||
* In: ctx: an existing context to copy.
|
||||
*/
|
||||
SECP256K1_API size_t rustsecp256k1_v0_8_1_context_preallocated_clone_size(
|
||||
const rustsecp256k1_v0_8_1_context* ctx
|
||||
SECP256K1_API size_t rustsecp256k1_v0_9_0_context_preallocated_clone_size(
|
||||
const rustsecp256k1_v0_9_0_context *ctx
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
|
||||
|
||||
/** Copy a secp256k1 context object into caller-provided memory.
|
||||
*
|
||||
* The caller must provide a pointer to a rewritable contiguous block of memory
|
||||
* of size at least rustsecp256k1_v0_8_1_context_preallocated_size(flags) bytes, suitably
|
||||
* of size at least rustsecp256k1_v0_9_0_context_preallocated_size(flags) bytes, suitably
|
||||
* aligned to hold an object of any type.
|
||||
*
|
||||
* The block of memory is exclusively owned by the created context object during
|
||||
* the lifetime of this context object, see the description of
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_create for details.
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_create for details.
|
||||
*
|
||||
* Cloning rustsecp256k1_v0_9_0_context_static is not possible, and should not be emulated by
|
||||
* the caller (e.g., using memcpy). Create a new context instead.
|
||||
*
|
||||
* Returns: a newly created context object.
|
||||
* Args: ctx: an existing context to copy.
|
||||
* Args: ctx: an existing context to copy (not rustsecp256k1_v0_9_0_context_static).
|
||||
* In: prealloc: a pointer to a rewritable contiguous block of memory of
|
||||
* size at least rustsecp256k1_v0_8_1_context_preallocated_size(flags)
|
||||
* size at least rustsecp256k1_v0_9_0_context_preallocated_size(flags)
|
||||
* bytes, as detailed above.
|
||||
*/
|
||||
SECP256K1_API rustsecp256k1_v0_8_1_context* rustsecp256k1_v0_8_1_context_preallocated_clone(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API rustsecp256k1_v0_9_0_context *rustsecp256k1_v0_9_0_context_preallocated_clone(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
void *prealloc
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT;
|
||||
|
||||
|
@ -105,22 +108,23 @@ SECP256K1_API rustsecp256k1_v0_8_1_context* rustsecp256k1_v0_8_1_context_preallo
|
|||
* The context pointer may not be used afterwards.
|
||||
*
|
||||
* The context to destroy must have been created using
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_create or rustsecp256k1_v0_8_1_context_preallocated_clone.
|
||||
* If the context has instead been created using rustsecp256k1_v0_8_1_context_create or
|
||||
* rustsecp256k1_v0_8_1_context_clone, the behaviour is undefined. In that case,
|
||||
* rustsecp256k1_v0_8_1_context_destroy must be used instead.
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_create or rustsecp256k1_v0_9_0_context_preallocated_clone.
|
||||
* If the context has instead been created using rustsecp256k1_v0_9_0_context_create or
|
||||
* rustsecp256k1_v0_9_0_context_clone, the behaviour is undefined. In that case,
|
||||
* rustsecp256k1_v0_9_0_context_destroy must be used instead.
|
||||
*
|
||||
* If required, it is the responsibility of the caller to deallocate the block
|
||||
* of memory properly after this function returns, e.g., by calling free on the
|
||||
* preallocated pointer given to rustsecp256k1_v0_8_1_context_preallocated_create or
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_clone.
|
||||
* preallocated pointer given to rustsecp256k1_v0_9_0_context_preallocated_create or
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_clone.
|
||||
*
|
||||
* Args: ctx: an existing context to destroy, constructed using
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_create or
|
||||
* rustsecp256k1_v0_8_1_context_preallocated_clone.
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_create or
|
||||
* rustsecp256k1_v0_9_0_context_preallocated_clone
|
||||
* (i.e., not rustsecp256k1_v0_9_0_context_static).
|
||||
*/
|
||||
SECP256K1_API void rustsecp256k1_v0_8_1_context_preallocated_destroy(
|
||||
rustsecp256k1_v0_8_1_context* ctx
|
||||
SECP256K1_API void rustsecp256k1_v0_9_0_context_preallocated_destroy(
|
||||
rustsecp256k1_v0_9_0_context *ctx
|
||||
) SECP256K1_ARG_NONNULL(1);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -14,8 +14,8 @@ extern "C" {
|
|||
* guaranteed to be portable between different platforms or versions. It is
|
||||
* however guaranteed to be 65 bytes in size, and can be safely copied/moved.
|
||||
* If you need to convert to a format suitable for storage or transmission, use
|
||||
* the rustsecp256k1_v0_8_1_ecdsa_signature_serialize_* and
|
||||
* rustsecp256k1_v0_8_1_ecdsa_signature_parse_* functions.
|
||||
* the rustsecp256k1_v0_9_0_ecdsa_signature_serialize_* and
|
||||
* rustsecp256k1_v0_9_0_ecdsa_signature_parse_* functions.
|
||||
*
|
||||
* Furthermore, it is guaranteed that identical signatures (including their
|
||||
* recoverability) will have identical representation, so they can be
|
||||
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
*/
|
||||
typedef struct {
|
||||
unsigned char data[65];
|
||||
} rustsecp256k1_v0_8_1_ecdsa_recoverable_signature;
|
||||
} rustsecp256k1_v0_9_0_ecdsa_recoverable_signature;
|
||||
|
||||
/** Parse a compact ECDSA signature (64 bytes + recovery id).
|
||||
*
|
||||
|
@ -33,9 +33,9 @@ typedef struct {
|
|||
* In: input64: a pointer to a 64-byte compact signature
|
||||
* recid: the recovery id (0, 1, 2 or 3)
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_recoverable_signature_parse_compact(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_recoverable_signature* sig,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_recoverable_signature_parse_compact(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_recoverable_signature *sig,
|
||||
const unsigned char *input64,
|
||||
int recid
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
@ -47,10 +47,10 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_recoverable_signature_parse_compact
|
|||
* Out: sig: a pointer to a normal signature.
|
||||
* In: sigin: a pointer to a recoverable signature.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_recoverable_signature_convert(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature* sig,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_recoverable_signature* sigin
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_recoverable_signature_convert(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature *sig,
|
||||
const rustsecp256k1_v0_9_0_ecdsa_recoverable_signature *sigin
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
|
||||
|
||||
/** Serialize an ECDSA signature in compact format (64 bytes + recovery id).
|
||||
|
@ -61,32 +61,32 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_recoverable_signature_convert(
|
|||
* recid: a pointer to an integer to hold the recovery id.
|
||||
* In: sig: a pointer to an initialized signature object.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_recoverable_signature_serialize_compact(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_recoverable_signature_serialize_compact(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *output64,
|
||||
int *recid,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_recoverable_signature* sig
|
||||
const rustsecp256k1_v0_9_0_ecdsa_recoverable_signature *sig
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Create a recoverable ECDSA signature.
|
||||
*
|
||||
* Returns: 1: signature created
|
||||
* 0: the nonce generation function failed, or the secret key was invalid.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: sig: pointer to an array where the signature will be placed.
|
||||
* In: msghash32: the 32-byte message hash being signed.
|
||||
* seckey: pointer to a 32-byte secret key.
|
||||
* noncefp: pointer to a nonce generation function. If NULL,
|
||||
* rustsecp256k1_v0_8_1_nonce_function_default is used.
|
||||
* rustsecp256k1_v0_9_0_nonce_function_default is used.
|
||||
* ndata: pointer to arbitrary data used by the nonce generation function
|
||||
* (can be NULL for rustsecp256k1_v0_8_1_nonce_function_default).
|
||||
* (can be NULL for rustsecp256k1_v0_9_0_nonce_function_default).
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_sign_recoverable(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_ecdsa_recoverable_signature *sig,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_ecdsa_sign_recoverable(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_ecdsa_recoverable_signature *sig,
|
||||
const unsigned char *msghash32,
|
||||
const unsigned char *seckey,
|
||||
rustsecp256k1_v0_8_1_nonce_function noncefp,
|
||||
rustsecp256k1_v0_9_0_nonce_function noncefp,
|
||||
const void *ndata
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
|
@ -99,10 +99,10 @@ SECP256K1_API int rustsecp256k1_v0_8_1_ecdsa_sign_recoverable(
|
|||
* In: sig: pointer to initialized signature that supports pubkey recovery.
|
||||
* msghash32: the 32-byte message hash assumed to be signed.
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_ecdsa_recover(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
rustsecp256k1_v0_8_1_pubkey *pubkey,
|
||||
const rustsecp256k1_v0_8_1_ecdsa_recoverable_signature *sig,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_ecdsa_recover(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
rustsecp256k1_v0_9_0_pubkey *pubkey,
|
||||
const rustsecp256k1_v0_9_0_ecdsa_recoverable_signature *sig,
|
||||
const unsigned char *msghash32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ extern "C" {
|
|||
|
||||
/** A pointer to a function to deterministically generate a nonce.
|
||||
*
|
||||
* Same as rustsecp256k1_v0_8_1_nonce function with the exception of accepting an
|
||||
* Same as rustsecp256k1_v0_9_0_nonce function with the exception of accepting an
|
||||
* additional pubkey argument and not requiring an attempt argument. The pubkey
|
||||
* argument can protect signature schemes with key-prefixed challenge hash
|
||||
* inputs against reusing the nonce when signing with the wrong precomputed
|
||||
|
@ -38,7 +38,7 @@ extern "C" {
|
|||
* Except for test cases, this function should compute some cryptographic hash of
|
||||
* the message, the key, the pubkey, the algorithm description, and data.
|
||||
*/
|
||||
typedef int (*rustsecp256k1_v0_8_1_nonce_function_hardened)(
|
||||
typedef int (*rustsecp256k1_v0_9_0_nonce_function_hardened)(
|
||||
unsigned char *nonce32,
|
||||
const unsigned char *msg,
|
||||
size_t msglen,
|
||||
|
@ -61,7 +61,7 @@ typedef int (*rustsecp256k1_v0_8_1_nonce_function_hardened)(
|
|||
* Therefore, to create BIP-340 compliant signatures, algo must be set to
|
||||
* "BIP0340/nonce" and algolen to 13.
|
||||
*/
|
||||
SECP256K1_API extern const rustsecp256k1_v0_8_1_nonce_function_hardened rustsecp256k1_v0_8_1_nonce_function_bip340;
|
||||
SECP256K1_API const rustsecp256k1_v0_9_0_nonce_function_hardened rustsecp256k1_v0_9_0_nonce_function_bip340;
|
||||
|
||||
/** Data structure that contains additional arguments for schnorrsig_sign_custom.
|
||||
*
|
||||
|
@ -73,17 +73,17 @@ SECP256K1_API extern const rustsecp256k1_v0_8_1_nonce_function_hardened rustsecp
|
|||
* and has no other function than making sure the object is
|
||||
* initialized.
|
||||
* noncefp: pointer to a nonce generation function. If NULL,
|
||||
* rustsecp256k1_v0_8_1_nonce_function_bip340 is used
|
||||
* rustsecp256k1_v0_9_0_nonce_function_bip340 is used
|
||||
* ndata: pointer to arbitrary data used by the nonce generation function
|
||||
* (can be NULL). If it is non-NULL and
|
||||
* rustsecp256k1_v0_8_1_nonce_function_bip340 is used, then ndata must be a
|
||||
* rustsecp256k1_v0_9_0_nonce_function_bip340 is used, then ndata must be a
|
||||
* pointer to 32-byte auxiliary randomness as per BIP-340.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned char magic[4];
|
||||
rustsecp256k1_v0_8_1_nonce_function_hardened noncefp;
|
||||
rustsecp256k1_v0_9_0_nonce_function_hardened noncefp;
|
||||
void *ndata;
|
||||
} rustsecp256k1_v0_8_1_schnorrsig_extraparams;
|
||||
} rustsecp256k1_v0_9_0_schnorrsig_extraparams;
|
||||
|
||||
#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c }
|
||||
#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\
|
||||
|
@ -95,18 +95,18 @@ typedef struct {
|
|||
/** Create a Schnorr signature.
|
||||
*
|
||||
* Does _not_ strictly follow BIP-340 because it does not verify the resulting
|
||||
* signature. Instead, you can manually use rustsecp256k1_v0_8_1_schnorrsig_verify and
|
||||
* signature. Instead, you can manually use rustsecp256k1_v0_9_0_schnorrsig_verify and
|
||||
* abort if it fails.
|
||||
*
|
||||
* This function only signs 32-byte messages. If you have messages of a
|
||||
* different size (or the same size but without a context-specific tag
|
||||
* prefix), it is recommended to create a 32-byte message hash with
|
||||
* rustsecp256k1_v0_8_1_tagged_sha256 and then sign the hash. Tagged hashing allows
|
||||
* rustsecp256k1_v0_9_0_tagged_sha256 and then sign the hash. Tagged hashing allows
|
||||
* providing an context-specific tag for domain separation. This prevents
|
||||
* signatures from being valid in multiple contexts by accident.
|
||||
*
|
||||
* Returns 1 on success, 0 on failure.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_1_context_static).
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: sig64: pointer to a 64-byte array to store the serialized signature.
|
||||
* In: msg32: the 32-byte message being signed.
|
||||
* keypair: pointer to an initialized keypair.
|
||||
|
@ -116,45 +116,53 @@ typedef struct {
|
|||
* BIP-340 "Default Signing" for a full explanation of this
|
||||
* argument and for guidance if randomness is expensive.
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_schnorrsig_sign32(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_schnorrsig_sign32(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *sig64,
|
||||
const unsigned char *msg32,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair,
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair,
|
||||
const unsigned char *aux_rand32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
|
||||
|
||||
/** Same as rustsecp256k1_v0_8_1_schnorrsig_sign32, but DEPRECATED. Will be removed in
|
||||
/** Same as rustsecp256k1_v0_9_0_schnorrsig_sign32, but DEPRECATED. Will be removed in
|
||||
* future versions. */
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_schnorrsig_sign(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_schnorrsig_sign(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *sig64,
|
||||
const unsigned char *msg32,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair,
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair,
|
||||
const unsigned char *aux_rand32
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4)
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_1_schnorrsig_sign32 instead");
|
||||
SECP256K1_DEPRECATED("Use rustsecp256k1_v0_9_0_schnorrsig_sign32 instead");
|
||||
|
||||
/** Create a Schnorr signature with a more flexible API.
|
||||
*
|
||||
* Same arguments as rustsecp256k1_v0_8_1_schnorrsig_sign except that it allows signing
|
||||
* Same arguments as rustsecp256k1_v0_9_0_schnorrsig_sign except that it allows signing
|
||||
* variable length messages and accepts a pointer to an extraparams object that
|
||||
* allows customizing signing by passing additional arguments.
|
||||
*
|
||||
* Creates the same signatures as schnorrsig_sign if msglen is 32 and the
|
||||
* extraparams.ndata is the same as aux_rand32.
|
||||
* Equivalent to rustsecp256k1_v0_9_0_schnorrsig_sign32(..., aux_rand32) if msglen is 32
|
||||
* and extraparams is initialized as follows:
|
||||
* ```
|
||||
* rustsecp256k1_v0_9_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
|
||||
* extraparams.ndata = (unsigned char*)aux_rand32;
|
||||
* ```
|
||||
*
|
||||
* Returns 1 on success, 0 on failure.
|
||||
* Args: ctx: pointer to a context object (not rustsecp256k1_v0_9_0_context_static).
|
||||
* Out: sig64: pointer to a 64-byte array to store the serialized signature.
|
||||
* In: msg: the message being signed. Can only be NULL if msglen is 0.
|
||||
* msglen: length of the message
|
||||
* extraparams: pointer to a extraparams object (can be NULL)
|
||||
* msglen: length of the message.
|
||||
* keypair: pointer to an initialized keypair.
|
||||
* extraparams: pointer to an extraparams object (can be NULL).
|
||||
*/
|
||||
SECP256K1_API int rustsecp256k1_v0_8_1_schnorrsig_sign_custom(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API int rustsecp256k1_v0_9_0_schnorrsig_sign_custom(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
unsigned char *sig64,
|
||||
const unsigned char *msg,
|
||||
size_t msglen,
|
||||
const rustsecp256k1_v0_8_1_keypair *keypair,
|
||||
rustsecp256k1_v0_8_1_schnorrsig_extraparams *extraparams
|
||||
const rustsecp256k1_v0_9_0_keypair *keypair,
|
||||
rustsecp256k1_v0_9_0_schnorrsig_extraparams *extraparams
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5);
|
||||
|
||||
/** Verify a Schnorr signature.
|
||||
|
@ -167,12 +175,12 @@ SECP256K1_API int rustsecp256k1_v0_8_1_schnorrsig_sign_custom(
|
|||
* msglen: length of the message
|
||||
* pubkey: pointer to an x-only public key to verify with (cannot be NULL)
|
||||
*/
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_1_schnorrsig_verify(
|
||||
const rustsecp256k1_v0_8_1_context* ctx,
|
||||
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_9_0_schnorrsig_verify(
|
||||
const rustsecp256k1_v0_9_0_context *ctx,
|
||||
const unsigned char *sig64,
|
||||
const unsigned char *msg,
|
||||
size_t msglen,
|
||||
const rustsecp256k1_v0_8_1_xonly_pubkey *pubkey
|
||||
const rustsecp256k1_v0_9_0_xonly_pubkey *pubkey
|
||||
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -9,5 +9,4 @@ URL: https://github.com/bitcoin-core/secp256k1
|
|||
Version: @PACKAGE_VERSION@
|
||||
Cflags: -I${includedir}
|
||||
Libs: -L${libdir} -lsecp256k1
|
||||
Libs.private: @SECP_LIBS@
|
||||
|
||||
|
|
|
@ -1,8 +1,14 @@
|
|||
load("rustsecp256k1_v0_8_1_params.sage")
|
||||
load("rustsecp256k1_v0_9_0_params.sage")
|
||||
|
||||
MAX_ORDER = 1000
|
||||
|
||||
# Set of (curve) orders we have encountered so far.
|
||||
orders_done = set()
|
||||
results = {}
|
||||
first = True
|
||||
|
||||
# Map from (subgroup) orders to [b, int(gen.x), int(gen.y), gen, lambda] for those subgroups.
|
||||
solutions = {}
|
||||
|
||||
# Iterate over curves of the form y^2 = x^3 + B.
|
||||
for b in range(1, P):
|
||||
# There are only 6 curves (up to isomorphism) of the form y^2 = x^3 + B. Stop once we have tried all.
|
||||
if len(orders_done) == 6:
|
||||
|
@ -11,114 +17,140 @@ for b in range(1, P):
|
|||
E = EllipticCurve(F, [0, b])
|
||||
print("Analyzing curve y^2 = x^3 + %i" % b)
|
||||
n = E.order()
|
||||
|
||||
# Skip curves with an order we've already tried
|
||||
if n in orders_done:
|
||||
print("- Isomorphic to earlier curve")
|
||||
print()
|
||||
continue
|
||||
orders_done.add(n)
|
||||
|
||||
# Skip curves isomorphic to the real secp256k1
|
||||
if n.is_pseudoprime():
|
||||
assert E.is_isomorphic(C)
|
||||
print("- Isomorphic to secp256k1")
|
||||
print()
|
||||
continue
|
||||
|
||||
print("- Finding subgroups")
|
||||
print("- Finding prime subgroups")
|
||||
|
||||
# Find what prime subgroups exist
|
||||
for f, _ in n.factor():
|
||||
print("- Analyzing subgroup of order %i" % f)
|
||||
# Skip subgroups of order >1000
|
||||
if f < 4 or f > 1000:
|
||||
print(" - Bad size")
|
||||
# Map from group_order to a set of independent generators for that order.
|
||||
curve_gens = {}
|
||||
|
||||
for g in E.gens():
|
||||
# Find what prime subgroups of group generated by g exist.
|
||||
g_order = g.order()
|
||||
for f, _ in g.order().factor():
|
||||
# Skip subgroups that have bad size.
|
||||
if f < 4:
|
||||
print(f" - Subgroup of size {f}: too small")
|
||||
continue
|
||||
if f > MAX_ORDER:
|
||||
print(f" - Subgroup of size {f}: too large")
|
||||
continue
|
||||
|
||||
# Iterate over X coordinates until we find one that is on the curve, has order f,
|
||||
# and for which curve isomorphism exists that maps it to X coordinate 1.
|
||||
for x in range(1, P):
|
||||
# Skip X coordinates not on the curve, and construct the full point otherwise.
|
||||
if not E.is_x_coord(x):
|
||||
continue
|
||||
G = E.lift_x(F(x))
|
||||
# Construct a generator for that subgroup.
|
||||
gen = g * (g_order // f)
|
||||
assert(gen.order() == f)
|
||||
|
||||
print(" - Analyzing (multiples of) point with X=%i" % x)
|
||||
# Add to set the minimal multiple of gen.
|
||||
curve_gens.setdefault(f, set()).add(min([j*gen for j in range(1, f)]))
|
||||
print(f" - Subgroup of size {f}: ok")
|
||||
|
||||
# Skip points whose order is not a multiple of f. Project the point to have
|
||||
# order f otherwise.
|
||||
if (G.order() % f):
|
||||
print(" - Bad order")
|
||||
continue
|
||||
G = G * (G.order() // f)
|
||||
for f in sorted(curve_gens.keys()):
|
||||
print(f"- Constructing group of order {f}")
|
||||
cbrts = sorted([int(c) for c in Integers(f)(1).nth_root(3, all=true) if c != 1])
|
||||
gens = list(curve_gens[f])
|
||||
sol_count = 0
|
||||
no_endo_count = 0
|
||||
|
||||
# Consider all non-zero linear combinations of the independent generators.
|
||||
for j in range(1, f**len(gens)):
|
||||
gen = sum(gens[k] * ((j // f**k) % f) for k in range(len(gens)))
|
||||
assert not gen.is_zero()
|
||||
assert (f*gen).is_zero()
|
||||
|
||||
# Find lambda for endomorphism. Skip if none can be found.
|
||||
lam = None
|
||||
for l in Integers(f)(1).nth_root(3, all=True):
|
||||
if int(l)*G == E(BETA*G[0], G[1]):
|
||||
lam = int(l)
|
||||
for l in cbrts:
|
||||
if l*gen == E(BETA*gen[0], gen[1]):
|
||||
lam = l
|
||||
break
|
||||
|
||||
if lam is None:
|
||||
print(" - No endomorphism for this subgroup")
|
||||
break
|
||||
no_endo_count += 1
|
||||
else:
|
||||
sol_count += 1
|
||||
solutions.setdefault(f, []).append((b, int(gen[0]), int(gen[1]), gen, lam))
|
||||
|
||||
# Now look for an isomorphism of the curve that gives this point an X
|
||||
# coordinate equal to 1.
|
||||
# If (x,y) is on y^2 = x^3 + b, then (a^2*x, a^3*y) is on y^2 = x^3 + a^6*b.
|
||||
# So look for m=a^2=1/x.
|
||||
m = F(1)/G[0]
|
||||
if not m.is_square():
|
||||
print(" - No curve isomorphism maps it to a point with X=1")
|
||||
continue
|
||||
a = m.sqrt()
|
||||
rb = a^6*b
|
||||
RE = EllipticCurve(F, [0, rb])
|
||||
print(f" - Found {sol_count} generators (plus {no_endo_count} without endomorphism)")
|
||||
|
||||
# Use as generator twice the image of G under the above isormorphism.
|
||||
# This means that generator*(1/2 mod f) will have X coordinate 1.
|
||||
RG = RE(1, a^3*G[1]) * 2
|
||||
# And even Y coordinate.
|
||||
if int(RG[1]) % 2:
|
||||
RG = -RG
|
||||
assert(RG.order() == f)
|
||||
assert(lam*RG == RE(BETA*RG[0], RG[1]))
|
||||
print()
|
||||
|
||||
# We have found curve RE:y^2=x^3+rb with generator RG of order f. Remember it
|
||||
results[f] = {"b": rb, "G": RG, "lambda": lam}
|
||||
print(" - Found solution")
|
||||
break
|
||||
def output_generator(g, name):
|
||||
print(f"#define {name} SECP256K1_GE_CONST(\\")
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x,\\" % tuple((int(g[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x,\\" % tuple((int(g[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x,\\" % tuple((int(g[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x\\" % tuple((int(g[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
|
||||
print(")")
|
||||
|
||||
print("")
|
||||
def output_b(b):
|
||||
print(f"#define SECP256K1_B {int(b)}")
|
||||
|
||||
print("")
|
||||
print("")
|
||||
print("/* To be put in src/group_impl.h: */")
|
||||
print()
|
||||
print("To be put in src/group_impl.h:")
|
||||
print()
|
||||
print("/* Begin of section generated by sage/gen_exhaustive_groups.sage. */")
|
||||
for f in sorted(solutions.keys()):
|
||||
# Use as generator/2 the one with lowest b, and lowest (x, y) generator (interpreted as non-negative integers).
|
||||
b, _, _, HALF_G, lam = min(solutions[f])
|
||||
output_generator(2 * HALF_G, f"SECP256K1_G_ORDER_{f}")
|
||||
print("/** Generator for secp256k1, value 'g' defined in")
|
||||
print(" * \"Standards for Efficient Cryptography\" (SEC2) 2.7.1.")
|
||||
print(" */")
|
||||
output_generator(G, "SECP256K1_G")
|
||||
print("/* These exhaustive group test orders and generators are chosen such that:")
|
||||
print(" * - The field size is equal to that of secp256k1, so field code is the same.")
|
||||
print(" * - The curve equation is of the form y^2=x^3+B for some small constant B.")
|
||||
print(" * - The subgroup has a generator 2*P, where P.x is as small as possible.")
|
||||
print(f" * - The subgroup has size less than {MAX_ORDER} to permit exhaustive testing.")
|
||||
print(" * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y).")
|
||||
print(" */")
|
||||
print("#if defined(EXHAUSTIVE_TEST_ORDER)")
|
||||
first = True
|
||||
for f in sorted(results.keys()):
|
||||
b = results[f]["b"]
|
||||
G = results[f]["G"]
|
||||
print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
|
||||
for f in sorted(solutions.keys()):
|
||||
b, _, _, _, lam = min(solutions[f])
|
||||
print(f"# {'if' if first else 'elif'} EXHAUSTIVE_TEST_ORDER == {f}")
|
||||
first = False
|
||||
print("static const rustsecp256k1_v0_8_1_ge rustsecp256k1_v0_8_1_ge_const_g = SECP256K1_GE_CONST(")
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
|
||||
print(");")
|
||||
print("static const rustsecp256k1_v0_8_1_fe rustsecp256k1_v0_8_1_fe_const_b = SECP256K1_FE_CONST(")
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
|
||||
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
|
||||
print(");")
|
||||
print()
|
||||
print(f"static const rustsecp256k1_v0_9_0_ge rustsecp256k1_v0_9_0_ge_const_g = SECP256K1_G_ORDER_{f};")
|
||||
output_b(b)
|
||||
print()
|
||||
print("# else")
|
||||
print("# error No known generator for the specified exhaustive test group order.")
|
||||
print("# endif")
|
||||
print("#else")
|
||||
print()
|
||||
print("static const rustsecp256k1_v0_9_0_ge rustsecp256k1_v0_9_0_ge_const_g = SECP256K1_G;")
|
||||
output_b(7)
|
||||
print()
|
||||
print("#endif")
|
||||
print("/* End of section generated by sage/gen_exhaustive_groups.sage. */")
|
||||
|
||||
print("")
|
||||
print("")
|
||||
print("/* To be put in src/scalar_impl.h: */")
|
||||
|
||||
print()
|
||||
print()
|
||||
print("To be put in src/scalar_impl.h:")
|
||||
print()
|
||||
print("/* Begin of section generated by sage/gen_exhaustive_groups.sage. */")
|
||||
first = True
|
||||
for f in sorted(results.keys()):
|
||||
lam = results[f]["lambda"]
|
||||
for f in sorted(solutions.keys()):
|
||||
_, _, _, _, lam = min(solutions[f])
|
||||
print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
|
||||
first = False
|
||||
print("# define EXHAUSTIVE_TEST_LAMBDA %i" % lam)
|
||||
print("# else")
|
||||
print("# error No known lambda for the specified exhaustive test group order.")
|
||||
print("# endif")
|
||||
print("")
|
||||
print("/* End of section generated by sage/gen_exhaustive_groups.sage. */")
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
""" Generates the constants used in rustsecp256k1_v0_8_1_scalar_split_lambda.
|
||||
""" Generates the constants used in rustsecp256k1_v0_9_0_scalar_split_lambda.
|
||||
|
||||
See the comments for rustsecp256k1_v0_8_1_scalar_split_lambda in src/scalar_impl.h for detailed explanations.
|
||||
See the comments for rustsecp256k1_v0_9_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations.
|
||||
"""
|
||||
|
||||
load("rustsecp256k1_v0_8_1_params.sage")
|
||||
load("rustsecp256k1_v0_9_0_params.sage")
|
||||
|
||||
def inf_norm(v):
|
||||
"""Returns the infinity norm of a vector."""
|
||||
|
@ -24,17 +24,17 @@ def gauss_reduction(i1, i2):
|
|||
v2[1] -= m*v1[1]
|
||||
|
||||
def find_split_constants_gauss():
|
||||
"""Find constants for rustsecp256k1_v0_8_1_scalar_split_lamdba using gauss reduction."""
|
||||
"""Find constants for rustsecp256k1_v0_9_0_scalar_split_lamdba using gauss reduction."""
|
||||
(v11, v12), (v21, v22) = gauss_reduction([0, N], [1, int(LAMBDA)])
|
||||
|
||||
# We use related vectors in rustsecp256k1_v0_8_1_scalar_split_lambda.
|
||||
# We use related vectors in rustsecp256k1_v0_9_0_scalar_split_lambda.
|
||||
A1, B1 = -v21, -v11
|
||||
A2, B2 = v22, -v21
|
||||
|
||||
return A1, B1, A2, B2
|
||||
|
||||
def find_split_constants_explicit_tof():
|
||||
"""Find constants for rustsecp256k1_v0_8_1_scalar_split_lamdba using the trace of Frobenius.
|
||||
"""Find constants for rustsecp256k1_v0_9_0_scalar_split_lamdba using the trace of Frobenius.
|
||||
|
||||
See Benjamin Smith: "Easy scalar decompositions for efficient scalar multiplication on
|
||||
elliptic curves and genus 2 Jacobians" (https://eprint.iacr.org/2013/672), Example 2
|
||||
|
@ -51,7 +51,7 @@ def find_split_constants_explicit_tof():
|
|||
A2 = Integer((t + c)/2 - 1)
|
||||
B2 = Integer(1 - (t - c)/2)
|
||||
|
||||
# We use a negated b values in rustsecp256k1_v0_8_1_scalar_split_lambda.
|
||||
# We use a negated b values in rustsecp256k1_v0_9_0_scalar_split_lambda.
|
||||
B1, B2 = -B1, -B2
|
||||
|
||||
return A1, B1, A2, B2
|
||||
|
@ -90,7 +90,7 @@ def rnddiv2(v):
|
|||
return v >> 1
|
||||
|
||||
def scalar_lambda_split(k):
|
||||
"""Equivalent to rustsecp256k1_v0_8_1_scalar_lambda_split()."""
|
||||
"""Equivalent to rustsecp256k1_v0_9_0_scalar_lambda_split()."""
|
||||
c1 = rnddiv2((k * G1) >> 383)
|
||||
c2 = rnddiv2((k * G2) >> 383)
|
||||
c1 = (c1 * -B1) % N
|
||||
|
|
|
@ -198,7 +198,7 @@ def normalize_factor(p):
|
|||
(8) * (-bx + ax)^3
|
||||
```
|
||||
"""
|
||||
# Assert p is not 0 and that its non-zero coeffients are coprime.
|
||||
# Assert p is not 0 and that its non-zero coefficients are coprime.
|
||||
# (We could just work with the primitive part p/p.content() but we want to be
|
||||
# aware if factor() does not return a primitive part in future sage versions.)
|
||||
assert p.content() == 1
|
||||
|
|
|
@ -5,8 +5,8 @@ import sys
|
|||
load("group_prover.sage")
|
||||
load("weierstrass_prover.sage")
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_double_var(a):
|
||||
"""libsecp256k1's rustsecp256k1_v0_8_1_gej_double_var, used by various addition functions"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_double_var(a):
|
||||
"""libsecp256k1's rustsecp256k1_v0_9_0_gej_double_var, used by various addition functions"""
|
||||
rz = a.Z * a.Y
|
||||
s = a.Y^2
|
||||
l = a.X^2
|
||||
|
@ -24,8 +24,8 @@ def formula_rustsecp256k1_v0_8_1_gej_double_var(a):
|
|||
ry = -ry
|
||||
return jacobianpoint(rx, ry, rz)
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_add_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_8_1_gej_add_var"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_add_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_9_0_gej_add_var"""
|
||||
if branch == 0:
|
||||
return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
|
||||
if branch == 1:
|
||||
|
@ -43,7 +43,7 @@ def formula_rustsecp256k1_v0_8_1_gej_add_var(branch, a, b):
|
|||
i = -s2
|
||||
i = i + s1
|
||||
if branch == 2:
|
||||
r = formula_rustsecp256k1_v0_8_1_gej_double_var(a)
|
||||
r = formula_rustsecp256k1_v0_9_0_gej_double_var(a)
|
||||
return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r)
|
||||
if branch == 3:
|
||||
return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity())
|
||||
|
@ -63,8 +63,8 @@ def formula_rustsecp256k1_v0_8_1_gej_add_var(branch, a, b):
|
|||
ry = ry + h3
|
||||
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_add_ge_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_8_1_gej_add_ge_var, which assume bz==1"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_add_ge_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_9_0_gej_add_ge_var, which assume bz==1"""
|
||||
if branch == 0:
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
|
||||
if branch == 1:
|
||||
|
@ -80,7 +80,7 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge_var(branch, a, b):
|
|||
i = -s2
|
||||
i = i + s1
|
||||
if (branch == 2):
|
||||
r = formula_rustsecp256k1_v0_8_1_gej_double_var(a)
|
||||
r = formula_rustsecp256k1_v0_9_0_gej_double_var(a)
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
|
||||
if (branch == 3):
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
|
||||
|
@ -99,8 +99,8 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge_var(branch, a, b):
|
|||
ry = ry + h3
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_add_zinv_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_8_1_gej_add_zinv_var"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_add_zinv_var(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_9_0_gej_add_zinv_var"""
|
||||
bzinv = b.Z^(-1)
|
||||
if branch == 0:
|
||||
rinf = b.Infinity
|
||||
|
@ -124,7 +124,7 @@ def formula_rustsecp256k1_v0_8_1_gej_add_zinv_var(branch, a, b):
|
|||
i = -s2
|
||||
i = i + s1
|
||||
if branch == 2:
|
||||
r = formula_rustsecp256k1_v0_8_1_gej_double_var(a)
|
||||
r = formula_rustsecp256k1_v0_9_0_gej_double_var(a)
|
||||
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
|
||||
if branch == 3:
|
||||
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
|
||||
|
@ -143,12 +143,12 @@ def formula_rustsecp256k1_v0_8_1_gej_add_zinv_var(branch, a, b):
|
|||
ry = ry + h3
|
||||
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_add_ge(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_8_1_gej_add_ge"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_add_ge(branch, a, b):
|
||||
"""libsecp256k1's rustsecp256k1_v0_9_0_gej_add_ge"""
|
||||
zeroes = {}
|
||||
nonzeroes = {}
|
||||
a_infinity = False
|
||||
if (branch & 4) != 0:
|
||||
if (branch & 2) != 0:
|
||||
nonzeroes.update({a.Infinity : 'a_infinite'})
|
||||
a_infinity = True
|
||||
else:
|
||||
|
@ -167,15 +167,11 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge(branch, a, b):
|
|||
m_alt = -u2
|
||||
tt = u1 * m_alt
|
||||
rr = rr + tt
|
||||
degenerate = (branch & 3) == 3
|
||||
if (branch & 1) != 0:
|
||||
degenerate = (branch & 1) != 0
|
||||
if degenerate:
|
||||
zeroes.update({m : 'm_zero'})
|
||||
else:
|
||||
nonzeroes.update({m : 'm_nonzero'})
|
||||
if (branch & 2) != 0:
|
||||
zeroes.update({rr : 'rr_zero'})
|
||||
else:
|
||||
nonzeroes.update({rr : 'rr_nonzero'})
|
||||
rr_alt = s1
|
||||
rr_alt = rr_alt * 2
|
||||
m_alt = m_alt + u1
|
||||
|
@ -190,13 +186,6 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge(branch, a, b):
|
|||
n = m
|
||||
t = rr_alt^2
|
||||
rz = a.Z * m_alt
|
||||
infinity = False
|
||||
if (branch & 8) != 0:
|
||||
if not a_infinity:
|
||||
infinity = True
|
||||
zeroes.update({rz : 'r.z=0'})
|
||||
else:
|
||||
nonzeroes.update({rz : 'r.z!=0'})
|
||||
t = t + q
|
||||
rx = t
|
||||
t = t * 2
|
||||
|
@ -209,12 +198,15 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge(branch, a, b):
|
|||
rx = b.X
|
||||
ry = b.Y
|
||||
rz = 1
|
||||
if infinity:
|
||||
if (branch & 4) != 0:
|
||||
zeroes.update({rz : 'r.z = 0'})
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity())
|
||||
else:
|
||||
nonzeroes.update({rz : 'r.z != 0'})
|
||||
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz))
|
||||
|
||||
def formula_rustsecp256k1_v0_8_1_gej_add_ge_old(branch, a, b):
|
||||
"""libsecp256k1's old rustsecp256k1_v0_8_1_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
|
||||
def formula_rustsecp256k1_v0_9_0_gej_add_ge_old(branch, a, b):
|
||||
"""libsecp256k1's old rustsecp256k1_v0_9_0_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
|
||||
a_infinity = (branch & 1) != 0
|
||||
zero = {}
|
||||
nonzero = {}
|
||||
|
@ -277,17 +269,17 @@ def formula_rustsecp256k1_v0_8_1_gej_add_ge_old(branch, a, b):
|
|||
|
||||
if __name__ == "__main__":
|
||||
success = True
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_ge_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_zinv_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_8_1_gej_add_ge)
|
||||
success = success & (not check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_8_1_gej_add_ge_old))
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_ge_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_zinv_var)
|
||||
success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_9_0_gej_add_ge)
|
||||
success = success & (not check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_9_0_gej_add_ge_old))
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive":
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_ge_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_8_1_gej_add_zinv_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_8_1_gej_add_ge, 43)
|
||||
success = success & (not check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_8_1_gej_add_ge_old, 43))
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_ge_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_9_0_gej_add_zinv_var, 43)
|
||||
success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge", 0, 7, 8, formula_rustsecp256k1_v0_9_0_gej_add_ge, 43)
|
||||
success = success & (not check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_9_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_9_0_gej_add_ge_old, 43))
|
||||
|
||||
sys.exit(int(not success))
|
||||
|
|
|
@ -0,0 +1,165 @@
|
|||
# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
|
||||
include(GNUInstallDirs)
|
||||
|
||||
add_library(rustsecp256k1_v0_9_0_precomputed OBJECT EXCLUDE_FROM_ALL
|
||||
precomputed_ecmult.c
|
||||
precomputed_ecmult_gen.c
|
||||
)
|
||||
|
||||
# Add objects explicitly rather than linking to the object libs to keep them
|
||||
# from being exported.
|
||||
add_library(secp256k1 secp256k1.c $<TARGET_OBJECTS:rustsecp256k1_v0_9_0_precomputed>)
|
||||
|
||||
add_library(rustsecp256k1_v0_9_0_asm INTERFACE)
|
||||
if(SECP256K1_ASM STREQUAL "arm32")
|
||||
add_library(rustsecp256k1_v0_9_0_asm_arm OBJECT EXCLUDE_FROM_ALL)
|
||||
target_sources(rustsecp256k1_v0_9_0_asm_arm PUBLIC
|
||||
asm/field_10x26_arm.s
|
||||
)
|
||||
target_sources(secp256k1 PRIVATE $<TARGET_OBJECTS:rustsecp256k1_v0_9_0_asm_arm>)
|
||||
target_link_libraries(rustsecp256k1_v0_9_0_asm INTERFACE rustsecp256k1_v0_9_0_asm_arm)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
# Define our export symbol only for shared libs.
|
||||
set_target_properties(secp256k1 PROPERTIES DEFINE_SYMBOL SECP256K1_DLL_EXPORT)
|
||||
target_compile_definitions(secp256k1 INTERFACE $<$<NOT:$<BOOL:${BUILD_SHARED_LIBS}>>:SECP256K1_STATIC>)
|
||||
endif()
|
||||
|
||||
# Object libs don't know if they're being built for a shared or static lib.
|
||||
# Grab the PIC property from secp256k1 which knows.
|
||||
get_target_property(use_pic secp256k1 POSITION_INDEPENDENT_CODE)
|
||||
set_target_properties(rustsecp256k1_v0_9_0_precomputed PROPERTIES POSITION_INDEPENDENT_CODE ${use_pic})
|
||||
|
||||
target_include_directories(secp256k1 INTERFACE
|
||||
# Add the include path for parent projects so that they don't have to manually add it.
|
||||
$<BUILD_INTERFACE:$<$<NOT:$<BOOL:${PROJECT_IS_TOP_LEVEL}>>:${PROJECT_SOURCE_DIR}/include>>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||
)
|
||||
|
||||
# This emulates Libtool to make sure Libtool and CMake agree on the ABI version,
|
||||
# see below "Calculate the version variables" in build-aux/ltmain.sh.
|
||||
math(EXPR ${PROJECT_NAME}_soversion "${${PROJECT_NAME}_LIB_VERSION_CURRENT} - ${${PROJECT_NAME}_LIB_VERSION_AGE}")
|
||||
set_target_properties(secp256k1 PROPERTIES
|
||||
SOVERSION ${${PROJECT_NAME}_soversion}
|
||||
)
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
set_target_properties(secp256k1 PROPERTIES
|
||||
VERSION ${${PROJECT_NAME}_soversion}.${${PROJECT_NAME}_LIB_VERSION_AGE}.${${PROJECT_NAME}_LIB_VERSION_REVISION}
|
||||
)
|
||||
elseif(APPLE)
|
||||
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.17)
|
||||
math(EXPR ${PROJECT_NAME}_compatibility_version "${${PROJECT_NAME}_LIB_VERSION_CURRENT} + 1")
|
||||
set_target_properties(secp256k1 PROPERTIES
|
||||
MACHO_COMPATIBILITY_VERSION ${${PROJECT_NAME}_compatibility_version}
|
||||
MACHO_CURRENT_VERSION ${${PROJECT_NAME}_compatibility_version}.${${PROJECT_NAME}_LIB_VERSION_REVISION}
|
||||
)
|
||||
unset(${PROJECT_NAME}_compatibility_version)
|
||||
elseif(BUILD_SHARED_LIBS)
|
||||
message(WARNING
|
||||
"The 'compatibility version' and 'current version' values of the DYLIB "
|
||||
"will diverge from the values set by the GNU Libtool. To ensure "
|
||||
"compatibility, it is recommended to upgrade CMake to at least version 3.17."
|
||||
)
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
set(${PROJECT_NAME}_windows "secp256k1")
|
||||
if(MSVC)
|
||||
set(${PROJECT_NAME}_windows "${PROJECT_NAME}")
|
||||
endif()
|
||||
set_target_properties(secp256k1 PROPERTIES
|
||||
ARCHIVE_OUTPUT_NAME "${${PROJECT_NAME}_windows}"
|
||||
RUNTIME_OUTPUT_NAME "${${PROJECT_NAME}_windows}-${${PROJECT_NAME}_soversion}"
|
||||
)
|
||||
unset(${PROJECT_NAME}_windows)
|
||||
endif()
|
||||
unset(${PROJECT_NAME}_soversion)
|
||||
|
||||
if(SECP256K1_BUILD_BENCHMARK)
|
||||
add_executable(bench bench.c)
|
||||
target_link_libraries(bench secp256k1)
|
||||
add_executable(bench_internal bench_internal.c)
|
||||
target_link_libraries(bench_internal rustsecp256k1_v0_9_0_precomputed rustsecp256k1_v0_9_0_asm)
|
||||
add_executable(bench_ecmult bench_ecmult.c)
|
||||
target_link_libraries(bench_ecmult rustsecp256k1_v0_9_0_precomputed rustsecp256k1_v0_9_0_asm)
|
||||
endif()
|
||||
|
||||
if(SECP256K1_BUILD_TESTS)
|
||||
add_executable(noverify_tests tests.c)
|
||||
target_link_libraries(noverify_tests rustsecp256k1_v0_9_0_precomputed rustsecp256k1_v0_9_0_asm)
|
||||
add_test(NAME noverify_tests COMMAND noverify_tests)
|
||||
if(NOT CMAKE_BUILD_TYPE STREQUAL "Coverage")
|
||||
add_executable(tests tests.c)
|
||||
target_compile_definitions(tests PRIVATE VERIFY)
|
||||
target_link_libraries(tests rustsecp256k1_v0_9_0_precomputed rustsecp256k1_v0_9_0_asm)
|
||||
add_test(NAME tests COMMAND tests)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(SECP256K1_BUILD_EXHAUSTIVE_TESTS)
|
||||
# Note: do not include rustsecp256k1_v0_9_0_precomputed in exhaustive_tests (it uses runtime-generated tables).
|
||||
add_executable(exhaustive_tests tests_exhaustive.c)
|
||||
target_link_libraries(exhaustive_tests rustsecp256k1_v0_9_0_asm)
|
||||
target_compile_definitions(exhaustive_tests PRIVATE $<$<NOT:$<CONFIG:Coverage>>:VERIFY>)
|
||||
add_test(NAME exhaustive_tests COMMAND exhaustive_tests)
|
||||
endif()
|
||||
|
||||
if(SECP256K1_BUILD_CTIME_TESTS)
|
||||
add_executable(ctime_tests ctime_tests.c)
|
||||
target_link_libraries(ctime_tests secp256k1)
|
||||
endif()
|
||||
|
||||
if(SECP256K1_INSTALL)
|
||||
install(TARGETS secp256k1
|
||||
EXPORT ${PROJECT_NAME}-targets
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
)
|
||||
set(${PROJECT_NAME}_headers
|
||||
"${PROJECT_SOURCE_DIR}/include/secp256k1.h"
|
||||
"${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_preallocated.h"
|
||||
)
|
||||
if(SECP256K1_ENABLE_MODULE_ECDH)
|
||||
list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_ecdh.h")
|
||||
endif()
|
||||
if(SECP256K1_ENABLE_MODULE_RECOVERY)
|
||||
list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_recovery.h")
|
||||
endif()
|
||||
if(SECP256K1_ENABLE_MODULE_EXTRAKEYS)
|
||||
list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_extrakeys.h")
|
||||
endif()
|
||||
if(SECP256K1_ENABLE_MODULE_SCHNORRSIG)
|
||||
list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_schnorrsig.h")
|
||||
endif()
|
||||
if(SECP256K1_ENABLE_MODULE_ELLSWIFT)
|
||||
list(APPEND ${PROJECT_NAME}_headers "${PROJECT_SOURCE_DIR}/include/rustsecp256k1_v0_9_0_ellswift.h")
|
||||
endif()
|
||||
install(FILES ${${PROJECT_NAME}_headers}
|
||||
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
|
||||
)
|
||||
|
||||
install(EXPORT ${PROJECT_NAME}-targets
|
||||
FILE ${PROJECT_NAME}-targets.cmake
|
||||
NAMESPACE ${PROJECT_NAME}::
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
|
||||
)
|
||||
|
||||
include(CMakePackageConfigHelpers)
|
||||
configure_package_config_file(
|
||||
${PROJECT_SOURCE_DIR}/cmake/config.cmake.in
|
||||
${PROJECT_NAME}-config.cmake
|
||||
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
|
||||
NO_SET_AND_CHECK_MACRO
|
||||
)
|
||||
write_basic_package_version_file(${PROJECT_NAME}-config-version.cmake
|
||||
COMPATIBILITY SameMinorVersion
|
||||
)
|
||||
|
||||
install(
|
||||
FILES
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
|
||||
)
|
||||
endif()
|
|
@ -27,8 +27,9 @@ Note:
|
|||
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
|
||||
|
||||
.align 2
|
||||
.global rustsecp256k1_v0_8_1_fe_mul_inner
|
||||
.type rustsecp256k1_v0_8_1_fe_mul_inner, %function
|
||||
.global rustsecp256k1_v0_9_0_fe_mul_inner
|
||||
.type rustsecp256k1_v0_9_0_fe_mul_inner, %function
|
||||
.hidden rustsecp256k1_v0_9_0_fe_mul_inner
|
||||
@ Arguments:
|
||||
@ r0 r Restrict: can overlap with a, not with b
|
||||
@ r1 a
|
||||
|
@ -36,7 +37,7 @@ Note:
|
|||
@ Stack (total 4+10*4 = 44)
|
||||
@ sp + #0 saved 'r' pointer
|
||||
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
|
||||
rustsecp256k1_v0_8_1_fe_mul_inner:
|
||||
rustsecp256k1_v0_9_0_fe_mul_inner:
|
||||
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
|
||||
sub sp, sp, #48 @ frame=44 + alignment
|
||||
str r0, [sp, #0] @ save result address, we need it only at the end
|
||||
|
@ -511,18 +512,19 @@ rustsecp256k1_v0_8_1_fe_mul_inner:
|
|||
|
||||
add sp, sp, #48
|
||||
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
|
||||
.size rustsecp256k1_v0_8_1_fe_mul_inner, .-rustsecp256k1_v0_8_1_fe_mul_inner
|
||||
.size rustsecp256k1_v0_9_0_fe_mul_inner, .-rustsecp256k1_v0_9_0_fe_mul_inner
|
||||
|
||||
.align 2
|
||||
.global rustsecp256k1_v0_8_1_fe_sqr_inner
|
||||
.type rustsecp256k1_v0_8_1_fe_sqr_inner, %function
|
||||
.global rustsecp256k1_v0_9_0_fe_sqr_inner
|
||||
.type rustsecp256k1_v0_9_0_fe_sqr_inner, %function
|
||||
.hidden rustsecp256k1_v0_9_0_fe_sqr_inner
|
||||
@ Arguments:
|
||||
@ r0 r Can overlap with a
|
||||
@ r1 a
|
||||
@ Stack (total 4+10*4 = 44)
|
||||
@ sp + #0 saved 'r' pointer
|
||||
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
|
||||
rustsecp256k1_v0_8_1_fe_sqr_inner:
|
||||
rustsecp256k1_v0_9_0_fe_sqr_inner:
|
||||
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
|
||||
sub sp, sp, #48 @ frame=44 + alignment
|
||||
str r0, [sp, #0] @ save result address, we need it only at the end
|
||||
|
@ -909,5 +911,5 @@ rustsecp256k1_v0_8_1_fe_sqr_inner:
|
|||
|
||||
add sp, sp, #48
|
||||
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
|
||||
.size rustsecp256k1_v0_8_1_fe_sqr_inner, .-rustsecp256k1_v0_8_1_fe_sqr_inner
|
||||
.size rustsecp256k1_v0_9_0_fe_sqr_inner, .-rustsecp256k1_v0_9_0_fe_sqr_inner
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
reduce the odds of experiencing an unwelcome surprise.
|
||||
*/
|
||||
|
||||
struct rustsecp256k1_v0_8_1_assumption_checker {
|
||||
struct rustsecp256k1_v0_9_0_assumption_checker {
|
||||
/* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not
|
||||
allowed. */
|
||||
int dummy_array[(
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "util.h"
|
||||
#include "bench.h"
|
||||
|
||||
void help(int default_iters) {
|
||||
static void help(int default_iters) {
|
||||
printf("Benchmarks the following algorithms:\n");
|
||||
printf(" - ECDSA signing/verification\n");
|
||||
|
||||
|
@ -38,6 +38,8 @@ void help(int default_iters) {
|
|||
printf(" ecdsa : all ECDSA algorithms--sign, verify, recovery (if enabled)\n");
|
||||
printf(" ecdsa_sign : ECDSA siging algorithm\n");
|
||||
printf(" ecdsa_verify : ECDSA verification algorithm\n");
|
||||
printf(" ec : all EC public key algorithms (keygen)\n");
|
||||
printf(" ec_keygen : EC public key generation\n");
|
||||
|
||||
#ifdef ENABLE_MODULE_RECOVERY
|
||||
printf(" ecdsa_recover : ECDSA public key recovery algorithm\n");
|
||||
|
@ -53,47 +55,49 @@ void help(int default_iters) {
|
|||
printf(" schnorrsig_verify : Schnorr verification algorithm\n");
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
printf(" ellswift : all ElligatorSwift benchmarks (encode, decode, keygen, ecdh)\n");
|
||||
printf(" ellswift_encode : ElligatorSwift encoding\n");
|
||||
printf(" ellswift_decode : ElligatorSwift decoding\n");
|
||||
printf(" ellswift_keygen : ElligatorSwift key generation\n");
|
||||
printf(" ellswift_ecdh : ECDH on ElligatorSwift keys\n");
|
||||
#endif
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_context *ctx;
|
||||
rustsecp256k1_v0_9_0_context *ctx;
|
||||
unsigned char msg[32];
|
||||
unsigned char key[32];
|
||||
unsigned char sig[72];
|
||||
size_t siglen;
|
||||
unsigned char pubkey[33];
|
||||
size_t pubkeylen;
|
||||
} bench_verify_data;
|
||||
} bench_data;
|
||||
|
||||
static void bench_verify(void* arg, int iters) {
|
||||
int i;
|
||||
bench_verify_data* data = (bench_verify_data*)arg;
|
||||
bench_data* data = (bench_data*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey;
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature sig;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature sig;
|
||||
data->sig[data->siglen - 1] ^= (i & 0xFF);
|
||||
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
|
||||
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
|
||||
CHECK(rustsecp256k1_v0_8_1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
|
||||
data->sig[data->siglen - 1] ^= (i & 0xFF);
|
||||
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
|
||||
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_context* ctx;
|
||||
unsigned char msg[32];
|
||||
unsigned char key[32];
|
||||
} bench_sign_data;
|
||||
|
||||
static void bench_sign_setup(void* arg) {
|
||||
int i;
|
||||
bench_sign_data *data = (bench_sign_data*)arg;
|
||||
bench_data *data = (bench_data*)arg;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
data->msg[i] = i + 1;
|
||||
|
@ -105,15 +109,15 @@ static void bench_sign_setup(void* arg) {
|
|||
|
||||
static void bench_sign_run(void* arg, int iters) {
|
||||
int i;
|
||||
bench_sign_data *data = (bench_sign_data*)arg;
|
||||
bench_data *data = (bench_data*)arg;
|
||||
|
||||
unsigned char sig[74];
|
||||
for (i = 0; i < iters; i++) {
|
||||
size_t siglen = 74;
|
||||
int j;
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature signature;
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature signature;
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
|
||||
for (j = 0; j < 32; j++) {
|
||||
data->msg[j] = sig[j];
|
||||
data->key[j] = sig[j + 32];
|
||||
|
@ -121,6 +125,30 @@ static void bench_sign_run(void* arg, int iters) {
|
|||
}
|
||||
}
|
||||
|
||||
static void bench_keygen_setup(void* arg) {
|
||||
int i;
|
||||
bench_data *data = (bench_data*)arg;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
data->key[i] = i + 65;
|
||||
}
|
||||
}
|
||||
|
||||
static void bench_keygen_run(void *arg, int iters) {
|
||||
int i;
|
||||
bench_data *data = (bench_data*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
unsigned char pub33[33];
|
||||
size_t len = 33;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_create(data->ctx, &pubkey, data->key));
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_serialize(data->ctx, pub33, &len, &pubkey, SECP256K1_EC_COMPRESSED));
|
||||
memcpy(data->key, pub33 + 1, 32);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_MODULE_ECDH
|
||||
# include "modules/ecdh/bench_impl.h"
|
||||
#endif
|
||||
|
@ -133,11 +161,15 @@ static void bench_sign_run(void* arg, int iters) {
|
|||
# include "modules/schnorrsig/bench_impl.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
# include "modules/ellswift/bench_impl.h"
|
||||
#endif
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int i;
|
||||
rustsecp256k1_v0_8_1_pubkey pubkey;
|
||||
rustsecp256k1_v0_8_1_ecdsa_signature sig;
|
||||
bench_verify_data data;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature sig;
|
||||
bench_data data;
|
||||
|
||||
int d = argc == 1;
|
||||
int default_iters = 20000;
|
||||
|
@ -145,7 +177,9 @@ int main(int argc, char** argv) {
|
|||
|
||||
/* Check for invalid user arguments */
|
||||
char* valid_args[] = {"ecdsa", "verify", "ecdsa_verify", "sign", "ecdsa_sign", "ecdh", "recover",
|
||||
"ecdsa_recover", "schnorrsig", "schnorrsig_verify", "schnorrsig_sign"};
|
||||
"ecdsa_recover", "schnorrsig", "schnorrsig_verify", "schnorrsig_sign", "ec",
|
||||
"keygen", "ec_keygen", "ellswift", "encode", "ellswift_encode", "decode",
|
||||
"ellswift_decode", "ellswift_keygen", "ellswift_ecdh"};
|
||||
size_t valid_args_size = sizeof(valid_args)/sizeof(valid_args[0]);
|
||||
int invalid_args = have_invalid_args(argc, argv, valid_args, valid_args_size);
|
||||
|
||||
|
@ -187,8 +221,18 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef ENABLE_MODULE_ELLSWIFT
|
||||
if (have_flag(argc, argv, "ellswift") || have_flag(argc, argv, "ellswift_encode") || have_flag(argc, argv, "ellswift_decode") ||
|
||||
have_flag(argc, argv, "encode") || have_flag(argc, argv, "decode") || have_flag(argc, argv, "ellswift_keygen") ||
|
||||
have_flag(argc, argv, "ellswift_ecdh")) {
|
||||
fprintf(stderr, "./bench: ElligatorSwift module not enabled.\n");
|
||||
fprintf(stderr, "Use ./configure --enable-module-ellswift.\n\n");
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ECDSA benchmark */
|
||||
data.ctx = rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE);
|
||||
data.ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
data.msg[i] = 1 + i;
|
||||
|
@ -197,18 +241,19 @@ int main(int argc, char** argv) {
|
|||
data.key[i] = 33 + i;
|
||||
}
|
||||
data.siglen = 72;
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
|
||||
CHECK(rustsecp256k1_v0_8_1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
|
||||
CHECK(rustsecp256k1_v0_8_1_ec_pubkey_create(data.ctx, &pubkey, data.key));
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_create(data.ctx, &pubkey, data.key));
|
||||
data.pubkeylen = 33;
|
||||
CHECK(rustsecp256k1_v0_8_1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
|
||||
|
||||
print_output_table_header_row();
|
||||
if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "verify") || have_flag(argc, argv, "ecdsa_verify")) run_benchmark("ecdsa_verify", bench_verify, NULL, NULL, &data, 10, iters);
|
||||
|
||||
if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "sign") || have_flag(argc, argv, "ecdsa_sign")) run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, iters);
|
||||
if (d || have_flag(argc, argv, "ec") || have_flag(argc, argv, "keygen") || have_flag(argc, argv, "ec_keygen")) run_benchmark("ec_keygen", bench_keygen_run, bench_keygen_setup, NULL, &data, 10, iters);
|
||||
|
||||
rustsecp256k1_v0_8_1_context_destroy(data.ctx);
|
||||
rustsecp256k1_v0_9_0_context_destroy(data.ctx);
|
||||
|
||||
#ifdef ENABLE_MODULE_ECDH
|
||||
/* ECDH benchmarks */
|
||||
|
@ -225,5 +270,10 @@ int main(int argc, char** argv) {
|
|||
run_schnorrsig_bench(iters, argc, argv);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
/* ElligatorSwift benchmarks */
|
||||
run_ellswift_bench(iters, argc, argv);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#if (defined(_MSC_VER) && _MSC_VER >= 1900)
|
||||
# include <time.h>
|
||||
#else
|
||||
# include "sys/time.h"
|
||||
# include <sys/time.h>
|
||||
#endif
|
||||
|
||||
static int64_t gettime_i64(void) {
|
||||
|
@ -38,7 +38,7 @@ static int64_t gettime_i64(void) {
|
|||
#define FP_MULT (1000000LL)
|
||||
|
||||
/* Format fixed point number. */
|
||||
void print_number(const int64_t x) {
|
||||
static void print_number(const int64_t x) {
|
||||
int64_t x_abs, y;
|
||||
int c, i, rounding, g; /* g = integer part size, c = fractional part size */
|
||||
size_t ptr;
|
||||
|
@ -95,7 +95,7 @@ void print_number(const int64_t x) {
|
|||
printf("%-*s", FP_EXP, &buffer[ptr + g]); /* Prints fractional part */
|
||||
}
|
||||
|
||||
void run_benchmark(char *name, void (*benchmark)(void*, int), void (*setup)(void*), void (*teardown)(void*, int), void* data, int count, int iter) {
|
||||
static void run_benchmark(char *name, void (*benchmark)(void*, int), void (*setup)(void*), void (*teardown)(void*, int), void* data, int count, int iter) {
|
||||
int i;
|
||||
int64_t min = INT64_MAX;
|
||||
int64_t sum = 0;
|
||||
|
@ -129,7 +129,7 @@ void run_benchmark(char *name, void (*benchmark)(void*, int), void (*setup)(void
|
|||
printf("\n");
|
||||
}
|
||||
|
||||
int have_flag(int argc, char** argv, char *flag) {
|
||||
static int have_flag(int argc, char** argv, char *flag) {
|
||||
char** argm = argv + argc;
|
||||
argv++;
|
||||
while (argv != argm) {
|
||||
|
@ -145,7 +145,7 @@ int have_flag(int argc, char** argv, char *flag) {
|
|||
returns:
|
||||
- 1 if the user entered an invalid argument
|
||||
- 0 if all the user entered arguments are valid */
|
||||
int have_invalid_args(int argc, char** argv, char** valid_args, size_t n) {
|
||||
static int have_invalid_args(int argc, char** argv, char** valid_args, size_t n) {
|
||||
size_t i;
|
||||
int found_valid;
|
||||
char** argm = argv + argc;
|
||||
|
@ -167,7 +167,7 @@ int have_invalid_args(int argc, char** argv, char** valid_args, size_t n) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int get_iters(int default_iters) {
|
||||
static int get_iters(int default_iters) {
|
||||
char* env = getenv("SECP256K1_BENCH_ITERS");
|
||||
if (env) {
|
||||
return strtol(env, NULL, 0);
|
||||
|
@ -176,7 +176,7 @@ int get_iters(int default_iters) {
|
|||
}
|
||||
}
|
||||
|
||||
void print_output_table_header_row(void) {
|
||||
static void print_output_table_header_row(void) {
|
||||
char* bench_str = "Benchmark"; /* left justified */
|
||||
char* min_str = " Min(us) "; /* center alignment */
|
||||
char* avg_str = " Avg(us) ";
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#define POINTS 32768
|
||||
|
||||
void help(char **argv) {
|
||||
static void help(char **argv) {
|
||||
printf("Benchmark EC multiplication algorithms\n");
|
||||
printf("\n");
|
||||
printf("Usage: %s <help|pippenger_wnaf|strauss_wnaf|simple>\n", argv[0]);
|
||||
|
@ -35,14 +35,14 @@ void help(char **argv) {
|
|||
|
||||
typedef struct {
|
||||
/* Setup once in advance */
|
||||
rustsecp256k1_v0_8_1_context* ctx;
|
||||
rustsecp256k1_v0_8_1_scratch_space* scratch;
|
||||
rustsecp256k1_v0_8_1_scalar* scalars;
|
||||
rustsecp256k1_v0_8_1_ge* pubkeys;
|
||||
rustsecp256k1_v0_8_1_gej* pubkeys_gej;
|
||||
rustsecp256k1_v0_8_1_scalar* seckeys;
|
||||
rustsecp256k1_v0_8_1_gej* expected_output;
|
||||
rustsecp256k1_v0_8_1_ecmult_multi_func ecmult_multi;
|
||||
rustsecp256k1_v0_9_0_context* ctx;
|
||||
rustsecp256k1_v0_9_0_scratch_space* scratch;
|
||||
rustsecp256k1_v0_9_0_scalar* scalars;
|
||||
rustsecp256k1_v0_9_0_ge* pubkeys;
|
||||
rustsecp256k1_v0_9_0_gej* pubkeys_gej;
|
||||
rustsecp256k1_v0_9_0_scalar* seckeys;
|
||||
rustsecp256k1_v0_9_0_gej* expected_output;
|
||||
rustsecp256k1_v0_9_0_ecmult_multi_func ecmult_multi;
|
||||
|
||||
/* Changes per benchmark */
|
||||
size_t count;
|
||||
|
@ -54,7 +54,7 @@ typedef struct {
|
|||
size_t offset2;
|
||||
|
||||
/* Benchmark output. */
|
||||
rustsecp256k1_v0_8_1_gej* output;
|
||||
rustsecp256k1_v0_9_0_gej* output;
|
||||
} bench_data;
|
||||
|
||||
/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */
|
||||
|
@ -67,24 +67,24 @@ static void hash_into_offset(bench_data* data, size_t x) {
|
|||
* sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */
|
||||
static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) {
|
||||
int i;
|
||||
rustsecp256k1_v0_8_1_gej sum_output, tmp;
|
||||
rustsecp256k1_v0_8_1_scalar sum_scalars;
|
||||
rustsecp256k1_v0_9_0_gej sum_output, tmp;
|
||||
rustsecp256k1_v0_9_0_scalar sum_scalars;
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(&sum_output);
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&sum_scalars);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(&sum_output);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&sum_scalars);
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL);
|
||||
if (scalar_gen_offset != NULL) {
|
||||
rustsecp256k1_v0_8_1_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]);
|
||||
}
|
||||
if (seckey_offset != NULL) {
|
||||
rustsecp256k1_v0_8_1_scalar s = data->seckeys[(*seckey_offset+i) % POINTS];
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]);
|
||||
rustsecp256k1_v0_8_1_scalar_add(&sum_scalars, &sum_scalars, &s);
|
||||
rustsecp256k1_v0_9_0_scalar s = data->seckeys[(*seckey_offset+i) % POINTS];
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&sum_scalars, &sum_scalars, &s);
|
||||
}
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars);
|
||||
CHECK(rustsecp256k1_v0_8_1_gej_eq_var(&tmp, &sum_output));
|
||||
rustsecp256k1_v0_9_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars);
|
||||
CHECK(rustsecp256k1_v0_9_0_gej_eq_var(&tmp, &sum_output));
|
||||
}
|
||||
|
||||
static void bench_ecmult_setup(void* arg) {
|
||||
|
@ -99,7 +99,7 @@ static void bench_ecmult_gen(void* arg, int iters) {
|
|||
int i;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ static void bench_ecmult_const(void* arg, int iters) {
|
|||
int i;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256);
|
||||
rustsecp256k1_v0_9_0_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ static void bench_ecmult_1p(void* arg, int iters) {
|
|||
int i;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL);
|
||||
rustsecp256k1_v0_9_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,12 +138,10 @@ static void bench_ecmult_1p_teardown(void* arg, int iters) {
|
|||
|
||||
static void bench_ecmult_0p_g(void* arg, int iters) {
|
||||
bench_data* data = (bench_data*)arg;
|
||||
rustsecp256k1_v0_8_1_scalar zero;
|
||||
int i;
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&zero, 0);
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_ecmult(&data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_ecmult(&data->output[i], NULL, &rustsecp256k1_v0_9_0_scalar_zero, &data->scalars[(data->offset1+i) % POINTS]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +155,7 @@ static void bench_ecmult_1p_g(void* arg, int iters) {
|
|||
int i;
|
||||
|
||||
for (i = 0; i < iters/2; ++i) {
|
||||
rustsecp256k1_v0_8_1_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,12 +181,12 @@ static void run_ecmult_bench(bench_data* data, int iters) {
|
|||
run_benchmark(str, bench_ecmult_1p_g, bench_ecmult_setup, bench_ecmult_1p_g_teardown, data, 10, 2*iters);
|
||||
}
|
||||
|
||||
static int bench_ecmult_multi_callback(rustsecp256k1_v0_8_1_scalar* sc, rustsecp256k1_v0_8_1_ge* ge, size_t idx, void* arg) {
|
||||
static int bench_ecmult_multi_callback(rustsecp256k1_v0_9_0_scalar* sc, rustsecp256k1_v0_9_0_ge* ge, size_t idx, void* arg) {
|
||||
bench_data* data = (bench_data*)arg;
|
||||
if (data->includes_g) ++idx;
|
||||
if (idx == 0) {
|
||||
*sc = data->scalars[data->offset1];
|
||||
*ge = rustsecp256k1_v0_8_1_ge_const_g;
|
||||
*ge = rustsecp256k1_v0_9_0_ge_const_g;
|
||||
} else {
|
||||
*sc = data->scalars[(data->offset1 + idx) % POINTS];
|
||||
*ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS];
|
||||
|
@ -222,14 +220,14 @@ static void bench_ecmult_multi_teardown(void* arg, int iters) {
|
|||
iters = iters / data->count;
|
||||
/* Verify the results in teardown, to avoid doing comparisons while benchmarking. */
|
||||
for (iter = 0; iter < iters; ++iter) {
|
||||
rustsecp256k1_v0_8_1_gej tmp;
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
|
||||
CHECK(rustsecp256k1_v0_8_1_gej_is_infinity(&tmp));
|
||||
rustsecp256k1_v0_9_0_gej tmp;
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
|
||||
CHECK(rustsecp256k1_v0_9_0_gej_is_infinity(&tmp));
|
||||
}
|
||||
}
|
||||
|
||||
static void generate_scalar(uint32_t num, rustsecp256k1_v0_8_1_scalar* scalar) {
|
||||
rustsecp256k1_v0_8_1_sha256 sha256;
|
||||
static void generate_scalar(uint32_t num, rustsecp256k1_v0_9_0_scalar* scalar) {
|
||||
rustsecp256k1_v0_9_0_sha256 sha256;
|
||||
unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
|
||||
unsigned char buf[32];
|
||||
int overflow = 0;
|
||||
|
@ -237,16 +235,15 @@ static void generate_scalar(uint32_t num, rustsecp256k1_v0_8_1_scalar* scalar) {
|
|||
c[7] = num >> 8;
|
||||
c[8] = num >> 16;
|
||||
c[9] = num >> 24;
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(&sha256);
|
||||
rustsecp256k1_v0_8_1_sha256_write(&sha256, c, sizeof(c));
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(&sha256, buf);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(scalar, buf, &overflow);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(&sha256);
|
||||
rustsecp256k1_v0_9_0_sha256_write(&sha256, c, sizeof(c));
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(&sha256, buf);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(scalar, buf, &overflow);
|
||||
CHECK(!overflow);
|
||||
}
|
||||
|
||||
static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) {
|
||||
char str[32];
|
||||
static const rustsecp256k1_v0_8_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
size_t iters = 1 + num_iters / count;
|
||||
size_t iter;
|
||||
|
||||
|
@ -256,15 +253,15 @@ static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_
|
|||
/* Compute (the negation of) the expected results directly. */
|
||||
hash_into_offset(data, data->count);
|
||||
for (iter = 0; iter < iters; ++iter) {
|
||||
rustsecp256k1_v0_8_1_scalar tmp;
|
||||
rustsecp256k1_v0_8_1_scalar total = data->scalars[(data->offset1++) % POINTS];
|
||||
rustsecp256k1_v0_9_0_scalar tmp;
|
||||
rustsecp256k1_v0_9_0_scalar total = data->scalars[(data->offset1++) % POINTS];
|
||||
size_t i = 0;
|
||||
for (i = 0; i + 1 < count; ++i) {
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
|
||||
rustsecp256k1_v0_8_1_scalar_add(&total, &total, &tmp);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&total, &total, &tmp);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_scalar_negate(&total, &total);
|
||||
rustsecp256k1_v0_8_1_ecmult(&data->expected_output[iter], NULL, &zero, &total);
|
||||
rustsecp256k1_v0_9_0_scalar_negate(&total, &total);
|
||||
rustsecp256k1_v0_9_0_ecmult(&data->expected_output[iter], NULL, &rustsecp256k1_v0_9_0_scalar_zero, &total);
|
||||
}
|
||||
|
||||
/* Run the benchmark. */
|
||||
|
@ -283,7 +280,7 @@ int main(int argc, char **argv) {
|
|||
|
||||
int iters = get_iters(10000);
|
||||
|
||||
data.ecmult_multi = rustsecp256k1_v0_8_1_ecmult_multi_var;
|
||||
data.ecmult_multi = rustsecp256k1_v0_9_0_ecmult_multi_var;
|
||||
|
||||
if (argc > 1) {
|
||||
if(have_flag(argc, argv, "-h")
|
||||
|
@ -293,10 +290,10 @@ int main(int argc, char **argv) {
|
|||
return 0;
|
||||
} else if(have_flag(argc, argv, "pippenger_wnaf")) {
|
||||
printf("Using pippenger_wnaf:\n");
|
||||
data.ecmult_multi = rustsecp256k1_v0_8_1_ecmult_pippenger_batch_single;
|
||||
data.ecmult_multi = rustsecp256k1_v0_9_0_ecmult_pippenger_batch_single;
|
||||
} else if(have_flag(argc, argv, "strauss_wnaf")) {
|
||||
printf("Using strauss_wnaf:\n");
|
||||
data.ecmult_multi = rustsecp256k1_v0_8_1_ecmult_strauss_batch_single;
|
||||
data.ecmult_multi = rustsecp256k1_v0_9_0_ecmult_strauss_batch_single;
|
||||
} else if(have_flag(argc, argv, "simple")) {
|
||||
printf("Using simple algorithm:\n");
|
||||
} else {
|
||||
|
@ -306,33 +303,33 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
}
|
||||
|
||||
data.ctx = rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE);
|
||||
scratch_size = rustsecp256k1_v0_8_1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
|
||||
data.ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE);
|
||||
scratch_size = rustsecp256k1_v0_9_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
|
||||
if (!have_flag(argc, argv, "simple")) {
|
||||
data.scratch = rustsecp256k1_v0_8_1_scratch_space_create(data.ctx, scratch_size);
|
||||
data.scratch = rustsecp256k1_v0_9_0_scratch_space_create(data.ctx, scratch_size);
|
||||
} else {
|
||||
data.scratch = NULL;
|
||||
}
|
||||
|
||||
/* Allocate stuff */
|
||||
data.scalars = malloc(sizeof(rustsecp256k1_v0_8_1_scalar) * POINTS);
|
||||
data.seckeys = malloc(sizeof(rustsecp256k1_v0_8_1_scalar) * POINTS);
|
||||
data.pubkeys = malloc(sizeof(rustsecp256k1_v0_8_1_ge) * POINTS);
|
||||
data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_8_1_gej) * POINTS);
|
||||
data.expected_output = malloc(sizeof(rustsecp256k1_v0_8_1_gej) * (iters + 1));
|
||||
data.output = malloc(sizeof(rustsecp256k1_v0_8_1_gej) * (iters + 1));
|
||||
data.scalars = malloc(sizeof(rustsecp256k1_v0_9_0_scalar) * POINTS);
|
||||
data.seckeys = malloc(sizeof(rustsecp256k1_v0_9_0_scalar) * POINTS);
|
||||
data.pubkeys = malloc(sizeof(rustsecp256k1_v0_9_0_ge) * POINTS);
|
||||
data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_9_0_gej) * POINTS);
|
||||
data.expected_output = malloc(sizeof(rustsecp256k1_v0_9_0_gej) * (iters + 1));
|
||||
data.output = malloc(sizeof(rustsecp256k1_v0_9_0_gej) * (iters + 1));
|
||||
|
||||
/* Generate a set of scalars, and private/public keypairs. */
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_8_1_ge_const_g);
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&data.seckeys[0], 1);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_9_0_ge_const_g);
|
||||
rustsecp256k1_v0_9_0_scalar_set_int(&data.seckeys[0], 1);
|
||||
for (i = 0; i < POINTS; ++i) {
|
||||
generate_scalar(i, &data.scalars[i]);
|
||||
if (i) {
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL);
|
||||
rustsecp256k1_v0_8_1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
|
||||
}
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS);
|
||||
rustsecp256k1_v0_9_0_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS);
|
||||
|
||||
|
||||
print_output_table_header_row();
|
||||
|
@ -356,9 +353,9 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
if (data.scratch != NULL) {
|
||||
rustsecp256k1_v0_8_1_scratch_space_destroy(data.ctx, data.scratch);
|
||||
rustsecp256k1_v0_9_0_scratch_space_destroy(data.ctx, data.scratch);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_context_destroy(data.ctx);
|
||||
rustsecp256k1_v0_9_0_context_destroy(data.ctx);
|
||||
free(data.scalars);
|
||||
free(data.pubkeys);
|
||||
free(data.pubkeys_gej);
|
||||
|
|
|
@ -19,15 +19,15 @@
|
|||
#include "bench.h"
|
||||
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_scalar scalar[2];
|
||||
rustsecp256k1_v0_8_1_fe fe[4];
|
||||
rustsecp256k1_v0_8_1_ge ge[2];
|
||||
rustsecp256k1_v0_8_1_gej gej[2];
|
||||
rustsecp256k1_v0_9_0_scalar scalar[2];
|
||||
rustsecp256k1_v0_9_0_fe fe[4];
|
||||
rustsecp256k1_v0_9_0_ge ge[2];
|
||||
rustsecp256k1_v0_9_0_gej gej[2];
|
||||
unsigned char data[64];
|
||||
int wnaf[256];
|
||||
} bench_inv;
|
||||
|
||||
void bench_setup(void* arg) {
|
||||
static void bench_setup(void* arg) {
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
static const unsigned char init[4][32] = {
|
||||
|
@ -63,291 +63,304 @@ void bench_setup(void* arg) {
|
|||
}
|
||||
};
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(&data->scalar[0], init[0], NULL);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(&data->scalar[1], init[1], NULL);
|
||||
rustsecp256k1_v0_8_1_fe_set_b32(&data->fe[0], init[0]);
|
||||
rustsecp256k1_v0_8_1_fe_set_b32(&data->fe[1], init[1]);
|
||||
rustsecp256k1_v0_8_1_fe_set_b32(&data->fe[2], init[2]);
|
||||
rustsecp256k1_v0_8_1_fe_set_b32(&data->fe[3], init[3]);
|
||||
CHECK(rustsecp256k1_v0_8_1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
|
||||
CHECK(rustsecp256k1_v0_8_1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&data->gej[0], &data->ge[0]);
|
||||
rustsecp256k1_v0_8_1_gej_rescale(&data->gej[0], &data->fe[2]);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&data->gej[1], &data->ge[1]);
|
||||
rustsecp256k1_v0_8_1_gej_rescale(&data->gej[1], &data->fe[3]);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(&data->scalar[0], init[0], NULL);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(&data->scalar[1], init[1], NULL);
|
||||
rustsecp256k1_v0_9_0_fe_set_b32_limit(&data->fe[0], init[0]);
|
||||
rustsecp256k1_v0_9_0_fe_set_b32_limit(&data->fe[1], init[1]);
|
||||
rustsecp256k1_v0_9_0_fe_set_b32_limit(&data->fe[2], init[2]);
|
||||
rustsecp256k1_v0_9_0_fe_set_b32_limit(&data->fe[3], init[3]);
|
||||
CHECK(rustsecp256k1_v0_9_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
|
||||
CHECK(rustsecp256k1_v0_9_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&data->gej[0], &data->ge[0]);
|
||||
rustsecp256k1_v0_9_0_gej_rescale(&data->gej[0], &data->fe[2]);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&data->gej[1], &data->ge[1]);
|
||||
rustsecp256k1_v0_9_0_gej_rescale(&data->gej[1], &data->fe[3]);
|
||||
memcpy(data->data, init[0], 32);
|
||||
memcpy(data->data + 32, init[1], 32);
|
||||
}
|
||||
|
||||
void bench_scalar_add(void* arg, int iters) {
|
||||
static void bench_scalar_add(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
j += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
j += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
void bench_scalar_negate(void* arg, int iters) {
|
||||
static void bench_scalar_negate(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_negate(&data->scalar[0], &data->scalar[0]);
|
||||
rustsecp256k1_v0_9_0_scalar_negate(&data->scalar[0], &data->scalar[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_scalar_mul(void* arg, int iters) {
|
||||
static void bench_scalar_mul(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_scalar_split(void* arg, int iters) {
|
||||
static void bench_scalar_split(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_9_0_scalar tmp;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
rustsecp256k1_v0_9_0_scalar_split_lambda(&tmp, &data->scalar[1], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &tmp, &data->scalar[1]);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
void bench_scalar_inverse(void* arg, int iters) {
|
||||
static void bench_scalar_inverse(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_inverse(&data->scalar[0], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
rustsecp256k1_v0_9_0_scalar_inverse(&data->scalar[0], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
void bench_scalar_inverse_var(void* arg, int iters) {
|
||||
static void bench_scalar_inverse_var(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
rustsecp256k1_v0_9_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
|
||||
j += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
void bench_field_half(void* arg, int iters) {
|
||||
static void bench_field_half(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_half(&data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_half(&data->fe[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_normalize(void* arg, int iters) {
|
||||
static void bench_field_normalize(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&data->fe[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_normalize_weak(void* arg, int iters) {
|
||||
static void bench_field_normalize_weak(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_normalize_weak(&data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_weak(&data->fe[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_mul(void* arg, int iters) {
|
||||
static void bench_field_mul(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_sqr(void* arg, int iters) {
|
||||
static void bench_field_sqr(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&data->fe[0], &data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&data->fe[0], &data->fe[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_inverse(void* arg, int iters) {
|
||||
static void bench_field_inverse(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_inv(&data->fe[0], &data->fe[0]);
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->fe[0], &data->fe[1]);
|
||||
rustsecp256k1_v0_9_0_fe_inv(&data->fe[0], &data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->fe[0], &data->fe[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_inverse_var(void* arg, int iters) {
|
||||
static void bench_field_inverse_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_fe_inv_var(&data->fe[0], &data->fe[0]);
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->fe[0], &data->fe[1]);
|
||||
rustsecp256k1_v0_9_0_fe_inv_var(&data->fe[0], &data->fe[0]);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->fe[0], &data->fe[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_field_sqrt(void* arg, int iters) {
|
||||
static void bench_field_sqrt(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_8_1_fe t;
|
||||
rustsecp256k1_v0_9_0_fe t;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
t = data->fe[0];
|
||||
j += rustsecp256k1_v0_8_1_fe_sqrt(&data->fe[0], &t);
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->fe[0], &data->fe[1]);
|
||||
j += rustsecp256k1_v0_9_0_fe_sqrt(&data->fe[0], &t);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->fe[0], &data->fe[1]);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
void bench_group_double_var(void* arg, int iters) {
|
||||
static void bench_field_is_square_var(void* arg, int iters) {
|
||||
int i, j = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_9_0_fe t = data->fe[0];
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
j += rustsecp256k1_v0_9_0_fe_is_square_var(&t);
|
||||
rustsecp256k1_v0_9_0_fe_add(&t, &data->fe[1]);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&t);
|
||||
}
|
||||
CHECK(j <= iters);
|
||||
}
|
||||
|
||||
static void bench_group_double_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&data->gej[0], &data->gej[0], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&data->gej[0], &data->gej[0], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_group_add_var(void* arg, int iters) {
|
||||
static void bench_group_add_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_group_add_affine(void* arg, int iters) {
|
||||
static void bench_group_add_affine(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_group_add_affine_var(void* arg, int iters) {
|
||||
static void bench_group_add_affine_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_group_add_zinv_var(void* arg, int iters) {
|
||||
static void bench_group_add_zinv_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y);
|
||||
rustsecp256k1_v0_9_0_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_group_to_affine_var(void* arg, int iters) {
|
||||
static void bench_group_to_affine_var(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
rustsecp256k1_v0_8_1_ge_set_gej_var(&data->ge[1], &data->gej[0]);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej_var(&data->ge[1], &data->gej[0]);
|
||||
/* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates.
|
||||
Note that the resulting coordinates will generally not correspond to a point
|
||||
on the curve, but this is not a problem for the code being benchmarked here.
|
||||
Adding and normalizing have less overhead than EC operations (which could
|
||||
guarantee the point remains on the curve). */
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->gej[0].x, &data->ge[1].y);
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->gej[0].y, &data->fe[2]);
|
||||
rustsecp256k1_v0_8_1_fe_add(&data->gej[0].z, &data->ge[1].x);
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&data->gej[0].x);
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&data->gej[0].y);
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&data->gej[0].z);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->gej[0].x, &data->ge[1].y);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->gej[0].y, &data->fe[2]);
|
||||
rustsecp256k1_v0_9_0_fe_add(&data->gej[0].z, &data->ge[1].x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&data->gej[0].x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&data->gej[0].y);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&data->gej[0].z);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_ecmult_wnaf(void* arg, int iters) {
|
||||
static void bench_ecmult_wnaf(void* arg, int iters) {
|
||||
int i, bits = 0, overflow = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
bits += rustsecp256k1_v0_8_1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
|
||||
overflow += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
bits += rustsecp256k1_v0_9_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
|
||||
overflow += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
CHECK(overflow >= 0);
|
||||
CHECK(bits <= 256*iters);
|
||||
}
|
||||
|
||||
void bench_wnaf_const(void* arg, int iters) {
|
||||
static void bench_wnaf_const(void* arg, int iters) {
|
||||
int i, bits = 0, overflow = 0;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
bits += rustsecp256k1_v0_8_1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
|
||||
overflow += rustsecp256k1_v0_8_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
bits += rustsecp256k1_v0_9_0_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
|
||||
overflow += rustsecp256k1_v0_9_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
|
||||
}
|
||||
CHECK(overflow >= 0);
|
||||
CHECK(bits <= 256*iters);
|
||||
}
|
||||
|
||||
|
||||
void bench_sha256(void* arg, int iters) {
|
||||
static void bench_sha256(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_8_1_sha256 sha;
|
||||
rustsecp256k1_v0_9_0_sha256 sha;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(&sha);
|
||||
rustsecp256k1_v0_8_1_sha256_write(&sha, data->data, 32);
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(&sha, data->data);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(&sha);
|
||||
rustsecp256k1_v0_9_0_sha256_write(&sha, data->data, 32);
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(&sha, data->data);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_hmac_sha256(void* arg, int iters) {
|
||||
static void bench_hmac_sha256(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_8_1_hmac_sha256 hmac;
|
||||
rustsecp256k1_v0_9_0_hmac_sha256 hmac;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, data->data, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, data->data, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, data->data);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, data->data, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, data->data, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, data->data);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_rfc6979_hmac_sha256(void* arg, int iters) {
|
||||
static void bench_rfc6979_hmac_sha256(void* arg, int iters) {
|
||||
int i;
|
||||
bench_inv *data = (bench_inv*)arg;
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 rng;
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 rng;
|
||||
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
|
||||
}
|
||||
}
|
||||
|
||||
void bench_context(void* arg, int iters) {
|
||||
static void bench_context(void* arg, int iters) {
|
||||
int i;
|
||||
(void)arg;
|
||||
for (i = 0; i < iters; i++) {
|
||||
rustsecp256k1_v0_8_1_context_destroy(rustsecp256k1_v0_8_1_context_create(SECP256K1_CONTEXT_NONE));
|
||||
rustsecp256k1_v0_9_0_context_destroy(rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_NONE));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -371,6 +384,7 @@ int main(int argc, char **argv) {
|
|||
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, iters*10);
|
||||
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, iters);
|
||||
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, iters);
|
||||
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "issquare")) run_benchmark("field_is_square_var", bench_field_is_square_var, bench_setup, NULL, &data, 10, iters);
|
||||
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt", bench_field_sqrt, bench_setup, NULL, &data, 10, iters);
|
||||
|
||||
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, iters*10);
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/***********************************************************************
|
||||
* Copyright (c) 2022 Pieter Wuille *
|
||||
* Distributed under the MIT software license, see the accompanying *
|
||||
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
|
||||
***********************************************************************/
|
||||
|
||||
/* The code here is inspired by Kris Kwiatkowski's approach in
|
||||
* https://github.com/kriskwiatkowski/pqc/blob/main/src/common/ct_check.h
|
||||
* to provide a general interface for memory-checking mechanisms, primarily
|
||||
* for constant-time checking.
|
||||
*/
|
||||
|
||||
/* These macros are defined by this header file:
|
||||
*
|
||||
* - SECP256K1_CHECKMEM_ENABLED:
|
||||
* - 1 if memory-checking integration is available, 0 otherwise.
|
||||
* This is just a compile-time macro. Use the next macro to check it is actually
|
||||
* available at runtime.
|
||||
* - SECP256K1_CHECKMEM_RUNNING():
|
||||
* - Acts like a function call, returning 1 if memory checking is available
|
||||
* at runtime.
|
||||
* - SECP256K1_CHECKMEM_CHECK(p, len):
|
||||
* - Assert or otherwise fail in case the len-byte memory block pointed to by p is
|
||||
* not considered entirely defined.
|
||||
* - SECP256K1_CHECKMEM_CHECK_VERIFY(p, len):
|
||||
* - Like SECP256K1_CHECKMEM_CHECK, but only works in VERIFY mode.
|
||||
* - SECP256K1_CHECKMEM_UNDEFINE(p, len):
|
||||
* - marks the len-byte memory block pointed to by p as undefined data (secret data,
|
||||
* in the context of constant-time checking).
|
||||
* - SECP256K1_CHECKMEM_DEFINE(p, len):
|
||||
* - marks the len-byte memory pointed to by p as defined data (public data, in the
|
||||
* context of constant-time checking).
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SECP256K1_CHECKMEM_H
|
||||
#define SECP256K1_CHECKMEM_H
|
||||
|
||||
/* Define a statement-like macro that ignores the arguments. */
|
||||
#define SECP256K1_CHECKMEM_NOOP(p, len) do { (void)(p); (void)(len); } while(0)
|
||||
|
||||
/* If compiling under msan, map the SECP256K1_CHECKMEM_* functionality to msan.
|
||||
* Choose this preferentially, even when VALGRIND is defined, as msan-compiled
|
||||
* binaries can't be run under valgrind anyway. */
|
||||
#if defined(__has_feature)
|
||||
# if __has_feature(memory_sanitizer)
|
||||
# include <sanitizer/msan_interface.h>
|
||||
# define SECP256K1_CHECKMEM_ENABLED 1
|
||||
# define SECP256K1_CHECKMEM_UNDEFINE(p, len) __msan_allocated_memory((p), (len))
|
||||
# define SECP256K1_CHECKMEM_DEFINE(p, len) __msan_unpoison((p), (len))
|
||||
# define SECP256K1_CHECKMEM_CHECK(p, len) __msan_check_mem_is_initialized((p), (len))
|
||||
# define SECP256K1_CHECKMEM_RUNNING() (1)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* If valgrind integration is desired (through the VALGRIND define), implement the
|
||||
* SECP256K1_CHECKMEM_* macros using valgrind. */
|
||||
#if !defined SECP256K1_CHECKMEM_ENABLED
|
||||
# if defined VALGRIND
|
||||
# include <stddef.h>
|
||||
# if defined(__clang__) && defined(__APPLE__)
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
# endif
|
||||
# include <valgrind/memcheck.h>
|
||||
# if defined(__clang__) && defined(__APPLE__)
|
||||
# pragma clang diagnostic pop
|
||||
# endif
|
||||
# define SECP256K1_CHECKMEM_ENABLED 1
|
||||
# define SECP256K1_CHECKMEM_UNDEFINE(p, len) VALGRIND_MAKE_MEM_UNDEFINED((p), (len))
|
||||
# define SECP256K1_CHECKMEM_DEFINE(p, len) VALGRIND_MAKE_MEM_DEFINED((p), (len))
|
||||
# define SECP256K1_CHECKMEM_CHECK(p, len) VALGRIND_CHECK_MEM_IS_DEFINED((p), (len))
|
||||
/* VALGRIND_MAKE_MEM_DEFINED returns 0 iff not running on memcheck.
|
||||
* This is more precise than the RUNNING_ON_VALGRIND macro, which
|
||||
* checks for valgrind in general instead of memcheck specifically. */
|
||||
# define SECP256K1_CHECKMEM_RUNNING() (VALGRIND_MAKE_MEM_DEFINED(NULL, 0) != 0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* As a fall-back, map these macros to dummy statements. */
|
||||
#if !defined SECP256K1_CHECKMEM_ENABLED
|
||||
# define SECP256K1_CHECKMEM_ENABLED 0
|
||||
# define SECP256K1_CHECKMEM_UNDEFINE(p, len) SECP256K1_CHECKMEM_NOOP((p), (len))
|
||||
# define SECP256K1_CHECKMEM_DEFINE(p, len) SECP256K1_CHECKMEM_NOOP((p), (len))
|
||||
# define SECP256K1_CHECKMEM_CHECK(p, len) SECP256K1_CHECKMEM_NOOP((p), (len))
|
||||
# define SECP256K1_CHECKMEM_RUNNING() (0)
|
||||
#endif
|
||||
|
||||
#if defined VERIFY
|
||||
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len) SECP256K1_CHECKMEM_CHECK((p), (len))
|
||||
#else
|
||||
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len) SECP256K1_CHECKMEM_NOOP((p), (len))
|
||||
#endif
|
||||
|
||||
#endif /* SECP256K1_CHECKMEM_H */
|
|
@ -0,0 +1,209 @@
|
|||
/***********************************************************************
|
||||
* Copyright (c) 2020 Gregory Maxwell *
|
||||
* Distributed under the MIT software license, see the accompanying *
|
||||
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
|
||||
***********************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "../include/secp256k1.h"
|
||||
#include "assumptions.h"
|
||||
#include "checkmem.h"
|
||||
|
||||
#if !SECP256K1_CHECKMEM_ENABLED
|
||||
# error "This tool cannot be compiled without memory-checking interface (valgrind or msan)"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ECDH
|
||||
# include "../include/rustsecp256k1_v0_9_0_ecdh.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_RECOVERY
|
||||
# include "../include/rustsecp256k1_v0_9_0_recovery.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_EXTRAKEYS
|
||||
# include "../include/rustsecp256k1_v0_9_0_extrakeys.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_SCHNORRSIG
|
||||
#include "../include/secp256k1_schnorrsig.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
#include "../include/secp256k1_ellswift.h"
|
||||
#endif
|
||||
|
||||
static void run_tests(rustsecp256k1_v0_9_0_context *ctx, unsigned char *key);
|
||||
|
||||
int main(void) {
|
||||
rustsecp256k1_v0_9_0_context* ctx;
|
||||
unsigned char key[32];
|
||||
int ret, i;
|
||||
|
||||
if (!SECP256K1_CHECKMEM_RUNNING()) {
|
||||
fprintf(stderr, "This test can only usefully be run inside valgrind because it was not compiled under msan.\n");
|
||||
fprintf(stderr, "Usage: libtool --mode=execute valgrind ./ctime_tests\n");
|
||||
return 1;
|
||||
}
|
||||
ctx = rustsecp256k1_v0_9_0_context_create(SECP256K1_CONTEXT_DECLASSIFY);
|
||||
/** In theory, testing with a single secret input should be sufficient:
|
||||
* If control flow depended on secrets the tool would generate an error.
|
||||
*/
|
||||
for (i = 0; i < 32; i++) {
|
||||
key[i] = i + 65;
|
||||
}
|
||||
|
||||
run_tests(ctx, key);
|
||||
|
||||
/* Test context randomisation. Do this last because it leaves the context
|
||||
* tainted. */
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_context_randomize(ctx, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret);
|
||||
|
||||
rustsecp256k1_v0_9_0_context_destroy(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void run_tests(rustsecp256k1_v0_9_0_context *ctx, unsigned char *key) {
|
||||
rustsecp256k1_v0_9_0_ecdsa_signature signature;
|
||||
rustsecp256k1_v0_9_0_pubkey pubkey;
|
||||
size_t siglen = 74;
|
||||
size_t outputlen = 33;
|
||||
int i;
|
||||
int ret;
|
||||
unsigned char msg[32];
|
||||
unsigned char sig[74];
|
||||
unsigned char spubkey[33];
|
||||
#ifdef ENABLE_MODULE_RECOVERY
|
||||
rustsecp256k1_v0_9_0_ecdsa_recoverable_signature recoverable_signature;
|
||||
int recid;
|
||||
#endif
|
||||
#ifdef ENABLE_MODULE_EXTRAKEYS
|
||||
rustsecp256k1_v0_9_0_keypair keypair;
|
||||
#endif
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
unsigned char ellswift[64];
|
||||
static const unsigned char prefix[64] = {'t', 'e', 's', 't'};
|
||||
#endif
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
msg[i] = i + 1;
|
||||
}
|
||||
|
||||
/* Test keygen. */
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ec_pubkey_create(ctx, &pubkey, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&pubkey, sizeof(rustsecp256k1_v0_9_0_pubkey));
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret);
|
||||
CHECK(rustsecp256k1_v0_9_0_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
|
||||
|
||||
/* Test signing. */
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&signature, sizeof(rustsecp256k1_v0_9_0_ecdsa_signature));
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret);
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature));
|
||||
|
||||
#ifdef ENABLE_MODULE_ECDH
|
||||
/* Test ECDH. */
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ecdh(ctx, msg, &pubkey, key, NULL, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_RECOVERY
|
||||
/* Test signing a recoverable signature. */
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&recoverable_signature, sizeof(recoverable_signature));
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret);
|
||||
CHECK(rustsecp256k1_v0_9_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature));
|
||||
CHECK(recid >= 0 && recid <= 3);
|
||||
#endif
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ec_seckey_verify(ctx, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ec_seckey_negate(ctx, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
SECP256K1_CHECKMEM_UNDEFINE(msg, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ec_seckey_tweak_add(ctx, key, msg);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
SECP256K1_CHECKMEM_UNDEFINE(msg, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ec_seckey_tweak_mul(ctx, key, msg);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
/* Test keypair_create and keypair_xonly_tweak_add. */
|
||||
#ifdef ENABLE_MODULE_EXTRAKEYS
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_keypair_create(ctx, &keypair, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
/* The tweak is not treated as a secret in keypair_tweak_add */
|
||||
SECP256K1_CHECKMEM_DEFINE(msg, 32);
|
||||
ret = rustsecp256k1_v0_9_0_keypair_xonly_tweak_add(ctx, &keypair, msg);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
SECP256K1_CHECKMEM_UNDEFINE(&keypair, sizeof(keypair));
|
||||
ret = rustsecp256k1_v0_9_0_keypair_sec(ctx, key, &keypair);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_SCHNORRSIG
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_keypair_create(ctx, &keypair, key);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
ret = rustsecp256k1_v0_9_0_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_MODULE_ELLSWIFT
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ellswift_create(ctx, ellswift, key, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
ret = rustsecp256k1_v0_9_0_ellswift_create(ctx, ellswift, key, ellswift);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ellswift, sizeof(ellswift));
|
||||
ret = rustsecp256k1_v0_9_0_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_9_0_ellswift_xdh_hash_function_bip324, NULL);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
|
||||
SECP256K1_CHECKMEM_UNDEFINE(key, 32);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ellswift, sizeof(ellswift));
|
||||
ret = rustsecp256k1_v0_9_0_ellswift_xdh(ctx, msg, ellswift, ellswift, key, i, rustsecp256k1_v0_9_0_ellswift_xdh_hash_function_prefix, (void *)prefix);
|
||||
SECP256K1_CHECKMEM_DEFINE(&ret, sizeof(ret));
|
||||
CHECK(ret == 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
|
@ -13,9 +13,9 @@
|
|||
#include "group.h"
|
||||
#include "ecmult.h"
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_parse(rustsecp256k1_v0_8_1_scalar *r, rustsecp256k1_v0_8_1_scalar *s, const unsigned char *sig, size_t size);
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_8_1_scalar *r, const rustsecp256k1_v0_8_1_scalar *s);
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_verify(const rustsecp256k1_v0_8_1_scalar* r, const rustsecp256k1_v0_8_1_scalar* s, const rustsecp256k1_v0_8_1_ge *pubkey, const rustsecp256k1_v0_8_1_scalar *message);
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_sign(const rustsecp256k1_v0_8_1_ecmult_gen_context *ctx, rustsecp256k1_v0_8_1_scalar* r, rustsecp256k1_v0_8_1_scalar* s, const rustsecp256k1_v0_8_1_scalar *seckey, const rustsecp256k1_v0_8_1_scalar *message, const rustsecp256k1_v0_8_1_scalar *nonce, int *recid);
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_parse(rustsecp256k1_v0_9_0_scalar *r, rustsecp256k1_v0_9_0_scalar *s, const unsigned char *sig, size_t size);
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_9_0_scalar *r, const rustsecp256k1_v0_9_0_scalar *s);
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_verify(const rustsecp256k1_v0_9_0_scalar* r, const rustsecp256k1_v0_9_0_scalar* s, const rustsecp256k1_v0_9_0_ge *pubkey, const rustsecp256k1_v0_9_0_scalar *message);
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_sign(const rustsecp256k1_v0_9_0_ecmult_gen_context *ctx, rustsecp256k1_v0_9_0_scalar* r, rustsecp256k1_v0_9_0_scalar* s, const rustsecp256k1_v0_9_0_scalar *seckey, const rustsecp256k1_v0_9_0_scalar *message, const rustsecp256k1_v0_9_0_scalar *nonce, int *recid);
|
||||
|
||||
#endif /* SECP256K1_ECDSA_H */
|
||||
|
|
|
@ -16,37 +16,24 @@
|
|||
#include "ecdsa.h"
|
||||
|
||||
/** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1
|
||||
* sage: for t in xrange(1023, -1, -1):
|
||||
* .. p = 2**256 - 2**32 - t
|
||||
* .. if p.is_prime():
|
||||
* .. print '%x'%p
|
||||
* .. break
|
||||
* 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f'
|
||||
* sage: a = 0
|
||||
* sage: b = 7
|
||||
* sage: F = FiniteField (p)
|
||||
* sage: '%x' % (EllipticCurve ([F (a), F (b)]).order())
|
||||
* 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141'
|
||||
* $ sage -c 'load("rustsecp256k1_v0_9_0_params.sage"); print(hex(N))'
|
||||
* 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
|
||||
*/
|
||||
static const rustsecp256k1_v0_8_1_fe rustsecp256k1_v0_8_1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
|
||||
static const rustsecp256k1_v0_9_0_fe rustsecp256k1_v0_9_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
|
||||
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
|
||||
0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
|
||||
);
|
||||
|
||||
/** Difference between field and order, values 'p' and 'n' values defined in
|
||||
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
|
||||
* sage: p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
|
||||
* sage: a = 0
|
||||
* sage: b = 7
|
||||
* sage: F = FiniteField (p)
|
||||
* sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order())
|
||||
* '14551231950b75fc4402da1722fc9baee'
|
||||
* $ sage -c 'load("rustsecp256k1_v0_9_0_params.sage"); print(hex(P-N))'
|
||||
* 0x14551231950b75fc4402da1722fc9baee
|
||||
*/
|
||||
static const rustsecp256k1_v0_8_1_fe rustsecp256k1_v0_8_1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
|
||||
static const rustsecp256k1_v0_9_0_fe rustsecp256k1_v0_9_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
|
||||
0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
|
||||
);
|
||||
|
||||
static int rustsecp256k1_v0_8_1_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) {
|
||||
static int rustsecp256k1_v0_9_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) {
|
||||
size_t lenleft;
|
||||
unsigned char b1;
|
||||
VERIFY_CHECK(len != NULL);
|
||||
|
@ -99,7 +86,7 @@ static int rustsecp256k1_v0_8_1_der_read_len(size_t *len, const unsigned char **
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_der_parse_integer(rustsecp256k1_v0_8_1_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
|
||||
static int rustsecp256k1_v0_9_0_der_parse_integer(rustsecp256k1_v0_9_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
|
||||
int overflow = 0;
|
||||
unsigned char ra[32] = {0};
|
||||
size_t rlen;
|
||||
|
@ -109,7 +96,7 @@ static int rustsecp256k1_v0_8_1_der_parse_integer(rustsecp256k1_v0_8_1_scalar *r
|
|||
return 0;
|
||||
}
|
||||
(*sig)++;
|
||||
if (rustsecp256k1_v0_8_1_der_read_len(&rlen, sig, sigend) == 0) {
|
||||
if (rustsecp256k1_v0_9_0_der_read_len(&rlen, sig, sigend) == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (rlen == 0 || rlen > (size_t)(sigend - *sig)) {
|
||||
|
@ -141,23 +128,23 @@ static int rustsecp256k1_v0_8_1_der_parse_integer(rustsecp256k1_v0_8_1_scalar *r
|
|||
}
|
||||
if (!overflow) {
|
||||
if (rlen) memcpy(ra + 32 - rlen, *sig, rlen);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(r, ra, &overflow);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(r, ra, &overflow);
|
||||
}
|
||||
if (overflow) {
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(r, 0);
|
||||
rustsecp256k1_v0_9_0_scalar_set_int(r, 0);
|
||||
}
|
||||
(*sig) += rlen;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_parse(rustsecp256k1_v0_8_1_scalar *rr, rustsecp256k1_v0_8_1_scalar *rs, const unsigned char *sig, size_t size) {
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_parse(rustsecp256k1_v0_9_0_scalar *rr, rustsecp256k1_v0_9_0_scalar *rs, const unsigned char *sig, size_t size) {
|
||||
const unsigned char *sigend = sig + size;
|
||||
size_t rlen;
|
||||
if (sig == sigend || *(sig++) != 0x30) {
|
||||
/* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */
|
||||
return 0;
|
||||
}
|
||||
if (rustsecp256k1_v0_8_1_der_read_len(&rlen, &sig, sigend) == 0) {
|
||||
if (rustsecp256k1_v0_9_0_der_read_len(&rlen, &sig, sigend) == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (rlen != (size_t)(sigend - sig)) {
|
||||
|
@ -165,10 +152,10 @@ static int rustsecp256k1_v0_8_1_ecdsa_sig_parse(rustsecp256k1_v0_8_1_scalar *rr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!rustsecp256k1_v0_8_1_der_parse_integer(rr, &sig, sigend)) {
|
||||
if (!rustsecp256k1_v0_9_0_der_parse_integer(rr, &sig, sigend)) {
|
||||
return 0;
|
||||
}
|
||||
if (!rustsecp256k1_v0_8_1_der_parse_integer(rs, &sig, sigend)) {
|
||||
if (!rustsecp256k1_v0_9_0_der_parse_integer(rs, &sig, sigend)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -180,12 +167,12 @@ static int rustsecp256k1_v0_8_1_ecdsa_sig_parse(rustsecp256k1_v0_8_1_scalar *rr,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_8_1_scalar* ar, const rustsecp256k1_v0_8_1_scalar* as) {
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_9_0_scalar* ar, const rustsecp256k1_v0_9_0_scalar* as) {
|
||||
unsigned char r[33] = {0}, s[33] = {0};
|
||||
unsigned char *rp = r, *sp = s;
|
||||
size_t lenR = 33, lenS = 33;
|
||||
rustsecp256k1_v0_8_1_scalar_get_b32(&r[1], ar);
|
||||
rustsecp256k1_v0_8_1_scalar_get_b32(&s[1], as);
|
||||
rustsecp256k1_v0_9_0_scalar_get_b32(&r[1], ar);
|
||||
rustsecp256k1_v0_9_0_scalar_get_b32(&s[1], as);
|
||||
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
|
||||
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
|
||||
if (*size < 6+lenS+lenR) {
|
||||
|
@ -204,42 +191,43 @@ static int rustsecp256k1_v0_8_1_ecdsa_sig_serialize(unsigned char *sig, size_t *
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_verify(const rustsecp256k1_v0_8_1_scalar *sigr, const rustsecp256k1_v0_8_1_scalar *sigs, const rustsecp256k1_v0_8_1_ge *pubkey, const rustsecp256k1_v0_8_1_scalar *message) {
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_verify(const rustsecp256k1_v0_9_0_scalar *sigr, const rustsecp256k1_v0_9_0_scalar *sigs, const rustsecp256k1_v0_9_0_ge *pubkey, const rustsecp256k1_v0_9_0_scalar *message) {
|
||||
unsigned char c[32];
|
||||
rustsecp256k1_v0_8_1_scalar sn, u1, u2;
|
||||
rustsecp256k1_v0_9_0_scalar sn, u1, u2;
|
||||
#if !defined(EXHAUSTIVE_TEST_ORDER)
|
||||
rustsecp256k1_v0_8_1_fe xr;
|
||||
rustsecp256k1_v0_9_0_fe xr;
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_gej pubkeyj;
|
||||
rustsecp256k1_v0_8_1_gej pr;
|
||||
rustsecp256k1_v0_9_0_gej pubkeyj;
|
||||
rustsecp256k1_v0_9_0_gej pr;
|
||||
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_zero(sigr) || rustsecp256k1_v0_8_1_scalar_is_zero(sigs)) {
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_zero(sigr) || rustsecp256k1_v0_9_0_scalar_is_zero(sigs)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_inverse_var(&sn, sigs);
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&u1, &sn, message);
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&u2, &sn, sigr);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&pubkeyj, pubkey);
|
||||
rustsecp256k1_v0_8_1_ecmult(&pr, &pubkeyj, &u2, &u1);
|
||||
if (rustsecp256k1_v0_8_1_gej_is_infinity(&pr)) {
|
||||
rustsecp256k1_v0_9_0_scalar_inverse_var(&sn, sigs);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&u1, &sn, message);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&u2, &sn, sigr);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&pubkeyj, pubkey);
|
||||
rustsecp256k1_v0_9_0_ecmult(&pr, &pubkeyj, &u2, &u1);
|
||||
if (rustsecp256k1_v0_9_0_gej_is_infinity(&pr)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||
{
|
||||
rustsecp256k1_v0_8_1_scalar computed_r;
|
||||
rustsecp256k1_v0_8_1_ge pr_ge;
|
||||
rustsecp256k1_v0_8_1_ge_set_gej(&pr_ge, &pr);
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&pr_ge.x);
|
||||
rustsecp256k1_v0_9_0_scalar computed_r;
|
||||
rustsecp256k1_v0_9_0_ge pr_ge;
|
||||
rustsecp256k1_v0_9_0_ge_set_gej(&pr_ge, &pr);
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&pr_ge.x);
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_get_b32(c, &pr_ge.x);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(&computed_r, c, NULL);
|
||||
return rustsecp256k1_v0_8_1_scalar_eq(sigr, &computed_r);
|
||||
rustsecp256k1_v0_9_0_fe_get_b32(c, &pr_ge.x);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(&computed_r, c, NULL);
|
||||
return rustsecp256k1_v0_9_0_scalar_eq(sigr, &computed_r);
|
||||
}
|
||||
#else
|
||||
rustsecp256k1_v0_8_1_scalar_get_b32(c, sigr);
|
||||
rustsecp256k1_v0_8_1_fe_set_b32(&xr, c);
|
||||
rustsecp256k1_v0_9_0_scalar_get_b32(c, sigr);
|
||||
/* we can ignore the fe_set_b32_limit return value, because we know the input is in range */
|
||||
(void)rustsecp256k1_v0_9_0_fe_set_b32_limit(&xr, c);
|
||||
|
||||
/** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
|
||||
* in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p),
|
||||
|
@ -255,18 +243,18 @@ static int rustsecp256k1_v0_8_1_ecdsa_sig_verify(const rustsecp256k1_v0_8_1_scal
|
|||
* <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x)
|
||||
*
|
||||
* Thus, we can avoid the inversion, but we have to check both cases separately.
|
||||
* rustsecp256k1_v0_8_1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
|
||||
* rustsecp256k1_v0_9_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
|
||||
*/
|
||||
if (rustsecp256k1_v0_8_1_gej_eq_x_var(&xr, &pr)) {
|
||||
if (rustsecp256k1_v0_9_0_gej_eq_x_var(&xr, &pr)) {
|
||||
/* xr * pr.z^2 mod p == pr.x, so the signature is valid. */
|
||||
return 1;
|
||||
}
|
||||
if (rustsecp256k1_v0_8_1_fe_cmp_var(&xr, &rustsecp256k1_v0_8_1_ecdsa_const_p_minus_order) >= 0) {
|
||||
if (rustsecp256k1_v0_9_0_fe_cmp_var(&xr, &rustsecp256k1_v0_9_0_ecdsa_const_p_minus_order) >= 0) {
|
||||
/* xr + n >= p, so we can skip testing the second case. */
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_add(&xr, &rustsecp256k1_v0_8_1_ecdsa_const_order_as_fe);
|
||||
if (rustsecp256k1_v0_8_1_gej_eq_x_var(&xr, &pr)) {
|
||||
rustsecp256k1_v0_9_0_fe_add(&xr, &rustsecp256k1_v0_9_0_ecdsa_const_order_as_fe);
|
||||
if (rustsecp256k1_v0_9_0_gej_eq_x_var(&xr, &pr)) {
|
||||
/* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */
|
||||
return 1;
|
||||
}
|
||||
|
@ -274,42 +262,42 @@ static int rustsecp256k1_v0_8_1_ecdsa_sig_verify(const rustsecp256k1_v0_8_1_scal
|
|||
#endif
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecdsa_sig_sign(const rustsecp256k1_v0_8_1_ecmult_gen_context *ctx, rustsecp256k1_v0_8_1_scalar *sigr, rustsecp256k1_v0_8_1_scalar *sigs, const rustsecp256k1_v0_8_1_scalar *seckey, const rustsecp256k1_v0_8_1_scalar *message, const rustsecp256k1_v0_8_1_scalar *nonce, int *recid) {
|
||||
static int rustsecp256k1_v0_9_0_ecdsa_sig_sign(const rustsecp256k1_v0_9_0_ecmult_gen_context *ctx, rustsecp256k1_v0_9_0_scalar *sigr, rustsecp256k1_v0_9_0_scalar *sigs, const rustsecp256k1_v0_9_0_scalar *seckey, const rustsecp256k1_v0_9_0_scalar *message, const rustsecp256k1_v0_9_0_scalar *nonce, int *recid) {
|
||||
unsigned char b[32];
|
||||
rustsecp256k1_v0_8_1_gej rp;
|
||||
rustsecp256k1_v0_8_1_ge r;
|
||||
rustsecp256k1_v0_8_1_scalar n;
|
||||
rustsecp256k1_v0_9_0_gej rp;
|
||||
rustsecp256k1_v0_9_0_ge r;
|
||||
rustsecp256k1_v0_9_0_scalar n;
|
||||
int overflow = 0;
|
||||
int high;
|
||||
|
||||
rustsecp256k1_v0_8_1_ecmult_gen(ctx, &rp, nonce);
|
||||
rustsecp256k1_v0_8_1_ge_set_gej(&r, &rp);
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&r.x);
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&r.y);
|
||||
rustsecp256k1_v0_8_1_fe_get_b32(b, &r.x);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(sigr, b, &overflow);
|
||||
rustsecp256k1_v0_9_0_ecmult_gen(ctx, &rp, nonce);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej(&r, &rp);
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&r.x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&r.y);
|
||||
rustsecp256k1_v0_9_0_fe_get_b32(b, &r.x);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(sigr, b, &overflow);
|
||||
if (recid) {
|
||||
/* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log
|
||||
* of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria.
|
||||
*/
|
||||
*recid = (overflow << 1) | rustsecp256k1_v0_8_1_fe_is_odd(&r.y);
|
||||
*recid = (overflow << 1) | rustsecp256k1_v0_9_0_fe_is_odd(&r.y);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_scalar_mul(&n, sigr, seckey);
|
||||
rustsecp256k1_v0_8_1_scalar_add(&n, &n, message);
|
||||
rustsecp256k1_v0_8_1_scalar_inverse(sigs, nonce);
|
||||
rustsecp256k1_v0_8_1_scalar_mul(sigs, sigs, &n);
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&n);
|
||||
rustsecp256k1_v0_8_1_gej_clear(&rp);
|
||||
rustsecp256k1_v0_8_1_ge_clear(&r);
|
||||
high = rustsecp256k1_v0_8_1_scalar_is_high(sigs);
|
||||
rustsecp256k1_v0_8_1_scalar_cond_negate(sigs, high);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(&n, sigr, seckey);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&n, &n, message);
|
||||
rustsecp256k1_v0_9_0_scalar_inverse(sigs, nonce);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(sigs, sigs, &n);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&n);
|
||||
rustsecp256k1_v0_9_0_gej_clear(&rp);
|
||||
rustsecp256k1_v0_9_0_ge_clear(&r);
|
||||
high = rustsecp256k1_v0_9_0_scalar_is_high(sigs);
|
||||
rustsecp256k1_v0_9_0_scalar_cond_negate(sigs, high);
|
||||
if (recid) {
|
||||
*recid ^= high;
|
||||
}
|
||||
/* P.x = order is on the curve, so technically sig->r could end up being zero, which would be an invalid signature.
|
||||
* This is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N.
|
||||
*/
|
||||
return (int)(!rustsecp256k1_v0_8_1_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_8_1_scalar_is_zero(sigs));
|
||||
return (int)(!rustsecp256k1_v0_9_0_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_9_0_scalar_is_zero(sigs));
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_ECDSA_IMPL_H */
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
#include "ecmult.h"
|
||||
#include "ecmult_gen.h"
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_parse(rustsecp256k1_v0_8_1_ge *elem, const unsigned char *pub, size_t size);
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_serialize(rustsecp256k1_v0_8_1_ge *elem, unsigned char *pub, size_t *size, int compressed);
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_parse(rustsecp256k1_v0_9_0_ge *elem, const unsigned char *pub, size_t size);
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_serialize(rustsecp256k1_v0_9_0_ge *elem, unsigned char *pub, size_t *size, int compressed);
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_privkey_tweak_add(rustsecp256k1_v0_8_1_scalar *key, const rustsecp256k1_v0_8_1_scalar *tweak);
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_tweak_add(rustsecp256k1_v0_8_1_ge *key, const rustsecp256k1_v0_8_1_scalar *tweak);
|
||||
static int rustsecp256k1_v0_8_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_8_1_scalar *key, const rustsecp256k1_v0_8_1_scalar *tweak);
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_tweak_mul(rustsecp256k1_v0_8_1_ge *key, const rustsecp256k1_v0_8_1_scalar *tweak);
|
||||
static int rustsecp256k1_v0_9_0_eckey_privkey_tweak_add(rustsecp256k1_v0_9_0_scalar *key, const rustsecp256k1_v0_9_0_scalar *tweak);
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_9_0_ge *key, const rustsecp256k1_v0_9_0_scalar *tweak);
|
||||
static int rustsecp256k1_v0_9_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_9_0_scalar *key, const rustsecp256k1_v0_9_0_scalar *tweak);
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_9_0_ge *key, const rustsecp256k1_v0_9_0_scalar *tweak);
|
||||
|
||||
#endif /* SECP256K1_ECKEY_H */
|
||||
|
|
|
@ -14,82 +14,78 @@
|
|||
#include "group.h"
|
||||
#include "ecmult_gen.h"
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_parse(rustsecp256k1_v0_8_1_ge *elem, const unsigned char *pub, size_t size) {
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_parse(rustsecp256k1_v0_9_0_ge *elem, const unsigned char *pub, size_t size) {
|
||||
if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) {
|
||||
rustsecp256k1_v0_8_1_fe x;
|
||||
return rustsecp256k1_v0_8_1_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_8_1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
|
||||
rustsecp256k1_v0_9_0_fe x;
|
||||
return rustsecp256k1_v0_9_0_fe_set_b32_limit(&x, pub+1) && rustsecp256k1_v0_9_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
|
||||
} else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
|
||||
rustsecp256k1_v0_8_1_fe x, y;
|
||||
if (!rustsecp256k1_v0_8_1_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_8_1_fe_set_b32(&y, pub+33)) {
|
||||
rustsecp256k1_v0_9_0_fe x, y;
|
||||
if (!rustsecp256k1_v0_9_0_fe_set_b32_limit(&x, pub+1) || !rustsecp256k1_v0_9_0_fe_set_b32_limit(&y, pub+33)) {
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ge_set_xy(elem, &x, &y);
|
||||
rustsecp256k1_v0_9_0_ge_set_xy(elem, &x, &y);
|
||||
if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) &&
|
||||
rustsecp256k1_v0_8_1_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
|
||||
rustsecp256k1_v0_9_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
|
||||
return 0;
|
||||
}
|
||||
return rustsecp256k1_v0_8_1_ge_is_valid_var(elem);
|
||||
return rustsecp256k1_v0_9_0_ge_is_valid_var(elem);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_serialize(rustsecp256k1_v0_8_1_ge *elem, unsigned char *pub, size_t *size, int compressed) {
|
||||
if (rustsecp256k1_v0_8_1_ge_is_infinity(elem)) {
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_serialize(rustsecp256k1_v0_9_0_ge *elem, unsigned char *pub, size_t *size, int compressed) {
|
||||
if (rustsecp256k1_v0_9_0_ge_is_infinity(elem)) {
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&elem->x);
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&elem->y);
|
||||
rustsecp256k1_v0_8_1_fe_get_b32(&pub[1], &elem->x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&elem->x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&elem->y);
|
||||
rustsecp256k1_v0_9_0_fe_get_b32(&pub[1], &elem->x);
|
||||
if (compressed) {
|
||||
*size = 33;
|
||||
pub[0] = rustsecp256k1_v0_8_1_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
|
||||
pub[0] = rustsecp256k1_v0_9_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
|
||||
} else {
|
||||
*size = 65;
|
||||
pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED;
|
||||
rustsecp256k1_v0_8_1_fe_get_b32(&pub[33], &elem->y);
|
||||
rustsecp256k1_v0_9_0_fe_get_b32(&pub[33], &elem->y);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_privkey_tweak_add(rustsecp256k1_v0_8_1_scalar *key, const rustsecp256k1_v0_8_1_scalar *tweak) {
|
||||
rustsecp256k1_v0_8_1_scalar_add(key, key, tweak);
|
||||
return !rustsecp256k1_v0_8_1_scalar_is_zero(key);
|
||||
static int rustsecp256k1_v0_9_0_eckey_privkey_tweak_add(rustsecp256k1_v0_9_0_scalar *key, const rustsecp256k1_v0_9_0_scalar *tweak) {
|
||||
rustsecp256k1_v0_9_0_scalar_add(key, key, tweak);
|
||||
return !rustsecp256k1_v0_9_0_scalar_is_zero(key);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_tweak_add(rustsecp256k1_v0_8_1_ge *key, const rustsecp256k1_v0_8_1_scalar *tweak) {
|
||||
rustsecp256k1_v0_8_1_gej pt;
|
||||
rustsecp256k1_v0_8_1_scalar one;
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&pt, key);
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&one, 1);
|
||||
rustsecp256k1_v0_8_1_ecmult(&pt, &pt, &one, tweak);
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_9_0_ge *key, const rustsecp256k1_v0_9_0_scalar *tweak) {
|
||||
rustsecp256k1_v0_9_0_gej pt;
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&pt, key);
|
||||
rustsecp256k1_v0_9_0_ecmult(&pt, &pt, &rustsecp256k1_v0_9_0_scalar_one, tweak);
|
||||
|
||||
if (rustsecp256k1_v0_8_1_gej_is_infinity(&pt)) {
|
||||
if (rustsecp256k1_v0_9_0_gej_is_infinity(&pt)) {
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ge_set_gej(key, &pt);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej(key, &pt);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_8_1_scalar *key, const rustsecp256k1_v0_8_1_scalar *tweak) {
|
||||
static int rustsecp256k1_v0_9_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_9_0_scalar *key, const rustsecp256k1_v0_9_0_scalar *tweak) {
|
||||
int ret;
|
||||
ret = !rustsecp256k1_v0_8_1_scalar_is_zero(tweak);
|
||||
ret = !rustsecp256k1_v0_9_0_scalar_is_zero(tweak);
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_mul(key, key, tweak);
|
||||
rustsecp256k1_v0_9_0_scalar_mul(key, key, tweak);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_eckey_pubkey_tweak_mul(rustsecp256k1_v0_8_1_ge *key, const rustsecp256k1_v0_8_1_scalar *tweak) {
|
||||
rustsecp256k1_v0_8_1_scalar zero;
|
||||
rustsecp256k1_v0_8_1_gej pt;
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_zero(tweak)) {
|
||||
static int rustsecp256k1_v0_9_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_9_0_ge *key, const rustsecp256k1_v0_9_0_scalar *tweak) {
|
||||
rustsecp256k1_v0_9_0_gej pt;
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_zero(tweak)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&zero, 0);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&pt, key);
|
||||
rustsecp256k1_v0_8_1_ecmult(&pt, &pt, tweak, &zero);
|
||||
rustsecp256k1_v0_8_1_ge_set_gej(key, &pt);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&pt, key);
|
||||
rustsecp256k1_v0_9_0_ecmult(&pt, &pt, tweak, &rustsecp256k1_v0_9_0_scalar_zero);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej(key, &pt);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,9 +41,9 @@
|
|||
#define ECMULT_TABLE_SIZE(w) (1L << ((w)-2))
|
||||
|
||||
/** Double multiply: R = na*A + ng*G */
|
||||
static void rustsecp256k1_v0_8_1_ecmult(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_scalar *na, const rustsecp256k1_v0_8_1_scalar *ng);
|
||||
static void rustsecp256k1_v0_9_0_ecmult(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_scalar *na, const rustsecp256k1_v0_9_0_scalar *ng);
|
||||
|
||||
typedef int (rustsecp256k1_v0_8_1_ecmult_multi_callback)(rustsecp256k1_v0_8_1_scalar *sc, rustsecp256k1_v0_8_1_ge *pt, size_t idx, void *data);
|
||||
typedef int (rustsecp256k1_v0_9_0_ecmult_multi_callback)(rustsecp256k1_v0_9_0_scalar *sc, rustsecp256k1_v0_9_0_ge *pt, size_t idx, void *data);
|
||||
|
||||
/**
|
||||
* Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai.
|
||||
|
@ -56,6 +56,6 @@ typedef int (rustsecp256k1_v0_8_1_ecmult_multi_callback)(rustsecp256k1_v0_8_1_sc
|
|||
* 0 if there is not enough scratch space for a single point or
|
||||
* callback returns 0
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_ecmult_multi_var(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n);
|
||||
static int rustsecp256k1_v0_9_0_ecmult_multi_var(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n);
|
||||
|
||||
#endif /* SECP256K1_ECMULT_H */
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
#define SECP256K1_ECMULT_COMPUTE_TABLE_H
|
||||
|
||||
/* Construct table of all odd multiples of gen in range 1..(2**(window_g-1)-1). */
|
||||
static void rustsecp256k1_v0_8_1_ecmult_compute_table(rustsecp256k1_v0_8_1_ge_storage* table, int window_g, const rustsecp256k1_v0_8_1_gej* gen);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_compute_table(rustsecp256k1_v0_9_0_ge_storage* table, int window_g, const rustsecp256k1_v0_9_0_gej* gen);
|
||||
|
||||
/* Like rustsecp256k1_v0_8_1_ecmult_compute_table, but one for both gen and gen*2^128. */
|
||||
static void rustsecp256k1_v0_8_1_ecmult_compute_two_tables(rustsecp256k1_v0_8_1_ge_storage* table, rustsecp256k1_v0_8_1_ge_storage* table_128, int window_g, const rustsecp256k1_v0_8_1_ge* gen);
|
||||
/* Like rustsecp256k1_v0_9_0_ecmult_compute_table, but one for both gen and gen*2^128. */
|
||||
static void rustsecp256k1_v0_9_0_ecmult_compute_two_tables(rustsecp256k1_v0_9_0_ge_storage* table, rustsecp256k1_v0_9_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_9_0_ge* gen);
|
||||
|
||||
#endif /* SECP256K1_ECMULT_COMPUTE_TABLE_H */
|
||||
|
|
|
@ -13,37 +13,37 @@
|
|||
#include "ecmult.h"
|
||||
#include "util.h"
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_compute_table(rustsecp256k1_v0_8_1_ge_storage* table, int window_g, const rustsecp256k1_v0_8_1_gej* gen) {
|
||||
rustsecp256k1_v0_8_1_gej gj;
|
||||
rustsecp256k1_v0_8_1_ge ge, dgen;
|
||||
static void rustsecp256k1_v0_9_0_ecmult_compute_table(rustsecp256k1_v0_9_0_ge_storage* table, int window_g, const rustsecp256k1_v0_9_0_gej* gen) {
|
||||
rustsecp256k1_v0_9_0_gej gj;
|
||||
rustsecp256k1_v0_9_0_ge ge, dgen;
|
||||
int j;
|
||||
|
||||
gj = *gen;
|
||||
rustsecp256k1_v0_8_1_ge_set_gej_var(&ge, &gj);
|
||||
rustsecp256k1_v0_8_1_ge_to_storage(&table[0], &ge);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej_var(&ge, &gj);
|
||||
rustsecp256k1_v0_9_0_ge_to_storage(&table[0], &ge);
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&gj, gen, NULL);
|
||||
rustsecp256k1_v0_8_1_ge_set_gej_var(&dgen, &gj);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&gj, gen, NULL);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej_var(&dgen, &gj);
|
||||
|
||||
for (j = 1; j < ECMULT_TABLE_SIZE(window_g); ++j) {
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&gj, &ge);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&gj, &gj, &dgen, NULL);
|
||||
rustsecp256k1_v0_8_1_ge_set_gej_var(&ge, &gj);
|
||||
rustsecp256k1_v0_8_1_ge_to_storage(&table[j], &ge);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&gj, &ge);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&gj, &gj, &dgen, NULL);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej_var(&ge, &gj);
|
||||
rustsecp256k1_v0_9_0_ge_to_storage(&table[j], &ge);
|
||||
}
|
||||
}
|
||||
|
||||
/* Like rustsecp256k1_v0_8_1_ecmult_compute_table, but one for both gen and gen*2^128. */
|
||||
static void rustsecp256k1_v0_8_1_ecmult_compute_two_tables(rustsecp256k1_v0_8_1_ge_storage* table, rustsecp256k1_v0_8_1_ge_storage* table_128, int window_g, const rustsecp256k1_v0_8_1_ge* gen) {
|
||||
rustsecp256k1_v0_8_1_gej gj;
|
||||
/* Like rustsecp256k1_v0_9_0_ecmult_compute_table, but one for both gen and gen*2^128. */
|
||||
static void rustsecp256k1_v0_9_0_ecmult_compute_two_tables(rustsecp256k1_v0_9_0_ge_storage* table, rustsecp256k1_v0_9_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_9_0_ge* gen) {
|
||||
rustsecp256k1_v0_9_0_gej gj;
|
||||
int i;
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&gj, gen);
|
||||
rustsecp256k1_v0_8_1_ecmult_compute_table(table, window_g, &gj);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&gj, gen);
|
||||
rustsecp256k1_v0_9_0_ecmult_compute_table(table, window_g, &gj);
|
||||
for (i = 0; i < 128; ++i) {
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&gj, &gj, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&gj, &gj, NULL);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ecmult_compute_table(table_128, window_g, &gj);
|
||||
rustsecp256k1_v0_9_0_ecmult_compute_table(table_128, window_g, &gj);
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_ECMULT_COMPUTE_TABLE_IMPL_H */
|
||||
|
|
|
@ -11,11 +11,28 @@
|
|||
#include "group.h"
|
||||
|
||||
/**
|
||||
* Multiply: R = q*A (in constant-time)
|
||||
* Here `bits` should be set to the maximum bitlength of the _absolute value_ of `q`, plus
|
||||
* one because we internally sometimes add 2 to the number during the WNAF conversion.
|
||||
* A must not be infinity.
|
||||
* Multiply: R = q*A (in constant-time for q)
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_ecmult_const(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_ge *a, const rustsecp256k1_v0_8_1_scalar *q, int bits);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_const(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_ge *a, const rustsecp256k1_v0_9_0_scalar *q);
|
||||
|
||||
/**
|
||||
* Same as rustsecp256k1_v0_9_0_ecmult_const, but takes in an x coordinate of the base point
|
||||
* only, specified as fraction n/d (numerator/denominator). Only the x coordinate of the result is
|
||||
* returned.
|
||||
*
|
||||
* If known_on_curve is 0, a verification is performed that n/d is a valid X
|
||||
* coordinate, and 0 is returned if not. Otherwise, 1 is returned.
|
||||
*
|
||||
* d being NULL is interpreted as d=1. If non-NULL, d must not be zero. q must not be zero.
|
||||
*
|
||||
* Constant time in the value of q, but not any other inputs.
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_ecmult_const_xonly(
|
||||
rustsecp256k1_v0_9_0_fe *r,
|
||||
const rustsecp256k1_v0_9_0_fe *n,
|
||||
const rustsecp256k1_v0_9_0_fe *d,
|
||||
const rustsecp256k1_v0_9_0_scalar *q,
|
||||
int known_on_curve
|
||||
);
|
||||
|
||||
#endif /* SECP256K1_ECMULT_CONST_H */
|
||||
|
|
|
@ -18,26 +18,26 @@
|
|||
* coordinates as ge_storage points in pre, and stores the global Z in globalz.
|
||||
* It only operates on tables sized for WINDOW_A wnaf multiples.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_8_1_ge *pre, rustsecp256k1_v0_8_1_fe *globalz, const rustsecp256k1_v0_8_1_gej *a) {
|
||||
rustsecp256k1_v0_8_1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
static void rustsecp256k1_v0_9_0_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_9_0_ge *pre, rustsecp256k1_v0_9_0_fe *globalz, const rustsecp256k1_v0_9_0_gej *a) {
|
||||
rustsecp256k1_v0_9_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
|
||||
rustsecp256k1_v0_8_1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a);
|
||||
rustsecp256k1_v0_8_1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr);
|
||||
rustsecp256k1_v0_9_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a);
|
||||
rustsecp256k1_v0_9_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr);
|
||||
}
|
||||
|
||||
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
|
||||
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
|
||||
int m = 0; \
|
||||
/* Extract the sign-bit for a constant time absolute-value. */ \
|
||||
int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
|
||||
int volatile mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
|
||||
int abs_n = ((n) + mask) ^ mask; \
|
||||
int idx_n = abs_n >> 1; \
|
||||
rustsecp256k1_v0_8_1_fe neg_y; \
|
||||
rustsecp256k1_v0_9_0_fe neg_y; \
|
||||
VERIFY_CHECK(((n) & 1) == 1); \
|
||||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
|
||||
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
|
||||
VERIFY_SETUP(rustsecp256k1_v0_8_1_fe_clear(&(r)->x)); \
|
||||
VERIFY_SETUP(rustsecp256k1_v0_8_1_fe_clear(&(r)->y)); \
|
||||
VERIFY_SETUP(rustsecp256k1_v0_9_0_fe_clear(&(r)->x)); \
|
||||
VERIFY_SETUP(rustsecp256k1_v0_9_0_fe_clear(&(r)->y)); \
|
||||
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \
|
||||
* or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
|
||||
(r)->x = (pre)[m].x; \
|
||||
|
@ -45,12 +45,12 @@ static void rustsecp256k1_v0_8_1_ecmult_odd_multiples_table_globalz_windowa(rust
|
|||
for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \
|
||||
/* This loop is used to avoid secret data in array indices. See
|
||||
* the comment in ecmult_gen_impl.h for rationale. */ \
|
||||
rustsecp256k1_v0_8_1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
|
||||
rustsecp256k1_v0_8_1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
|
||||
rustsecp256k1_v0_9_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
|
||||
rustsecp256k1_v0_9_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
|
||||
} \
|
||||
(r)->infinity = 0; \
|
||||
rustsecp256k1_v0_8_1_fe_negate(&neg_y, &(r)->y, 1); \
|
||||
rustsecp256k1_v0_8_1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
|
||||
rustsecp256k1_v0_9_0_fe_negate(&neg_y, &(r)->y, 1); \
|
||||
rustsecp256k1_v0_9_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
|
||||
} while(0)
|
||||
|
||||
/** Convert a number to WNAF notation.
|
||||
|
@ -66,7 +66,7 @@ static void rustsecp256k1_v0_8_1_ecmult_odd_multiples_table_globalz_windowa(rust
|
|||
*
|
||||
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_8_1_scalar *scalar, int w, int size) {
|
||||
static int rustsecp256k1_v0_9_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_9_0_scalar *scalar, int w, int size) {
|
||||
int global_sign;
|
||||
int skew;
|
||||
int word = 0;
|
||||
|
@ -76,7 +76,7 @@ static int rustsecp256k1_v0_8_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
int u;
|
||||
|
||||
int flip;
|
||||
rustsecp256k1_v0_8_1_scalar s = *scalar;
|
||||
rustsecp256k1_v0_9_0_scalar s = *scalar;
|
||||
|
||||
VERIFY_CHECK(w > 0);
|
||||
VERIFY_CHECK(size > 0);
|
||||
|
@ -93,18 +93,18 @@ static int rustsecp256k1_v0_8_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
* particular, to ensure that the outputs from the endomorphism-split fit into
|
||||
* 128 bits). If we negate, the parity of our number flips, affecting whether
|
||||
* we want to add to the scalar to ensure that it's odd. */
|
||||
flip = rustsecp256k1_v0_8_1_scalar_is_high(&s);
|
||||
skew = flip ^ rustsecp256k1_v0_8_1_scalar_is_even(&s);
|
||||
rustsecp256k1_v0_8_1_scalar_cadd_bit(&s, 0, skew);
|
||||
global_sign = rustsecp256k1_v0_8_1_scalar_cond_negate(&s, flip);
|
||||
flip = rustsecp256k1_v0_9_0_scalar_is_high(&s);
|
||||
skew = flip ^ rustsecp256k1_v0_9_0_scalar_is_even(&s);
|
||||
rustsecp256k1_v0_9_0_scalar_cadd_bit(&s, 0, skew);
|
||||
global_sign = rustsecp256k1_v0_9_0_scalar_cond_negate(&s, flip);
|
||||
|
||||
/* 4 */
|
||||
u_last = rustsecp256k1_v0_8_1_scalar_shr_int(&s, w);
|
||||
u_last = rustsecp256k1_v0_9_0_scalar_shr_int(&s, w);
|
||||
do {
|
||||
int even;
|
||||
|
||||
/* 4.1 4.4 */
|
||||
u = rustsecp256k1_v0_8_1_scalar_shr_int(&s, w);
|
||||
u = rustsecp256k1_v0_9_0_scalar_shr_int(&s, w);
|
||||
/* 4.2 */
|
||||
even = ((u & 1) == 0);
|
||||
/* In contrast to the original algorithm, u_last is always > 0 and
|
||||
|
@ -125,39 +125,36 @@ static int rustsecp256k1_v0_8_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
} while (word * w < size);
|
||||
wnaf[word] = u * global_sign;
|
||||
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_scalar_is_zero(&s));
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_scalar_is_zero(&s));
|
||||
VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
|
||||
return skew;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_const(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_ge *a, const rustsecp256k1_v0_8_1_scalar *scalar, int size) {
|
||||
rustsecp256k1_v0_8_1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
rustsecp256k1_v0_8_1_ge tmpa;
|
||||
rustsecp256k1_v0_8_1_fe Z;
|
||||
static void rustsecp256k1_v0_9_0_ecmult_const(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_ge *a, const rustsecp256k1_v0_9_0_scalar *scalar) {
|
||||
rustsecp256k1_v0_9_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
rustsecp256k1_v0_9_0_ge tmpa;
|
||||
rustsecp256k1_v0_9_0_fe Z;
|
||||
|
||||
int skew_1;
|
||||
rustsecp256k1_v0_8_1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
rustsecp256k1_v0_9_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
|
||||
int skew_lam;
|
||||
rustsecp256k1_v0_8_1_scalar q_1, q_lam;
|
||||
rustsecp256k1_v0_9_0_scalar q_1, q_lam;
|
||||
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
|
||||
|
||||
int i;
|
||||
|
||||
/* build wnaf representation for q. */
|
||||
int rsize = size;
|
||||
if (size > 128) {
|
||||
rsize = 128;
|
||||
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
|
||||
rustsecp256k1_v0_8_1_scalar_split_lambda(&q_1, &q_lam, scalar);
|
||||
skew_1 = rustsecp256k1_v0_8_1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
|
||||
skew_lam = rustsecp256k1_v0_8_1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
|
||||
} else
|
||||
{
|
||||
skew_1 = rustsecp256k1_v0_8_1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
|
||||
skew_lam = 0;
|
||||
if (rustsecp256k1_v0_9_0_ge_is_infinity(a)) {
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
return;
|
||||
}
|
||||
|
||||
/* build wnaf representation for q. */
|
||||
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
|
||||
rustsecp256k1_v0_9_0_scalar_split_lambda(&q_1, &q_lam, scalar);
|
||||
skew_1 = rustsecp256k1_v0_9_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
|
||||
skew_lam = rustsecp256k1_v0_9_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
|
||||
|
||||
/* Calculate odd multiples of a.
|
||||
* All multiples are brought to the same Z 'denominator', which is stored
|
||||
* in Z. Due to secp256k1' isomorphism we can do all operations pretending
|
||||
|
@ -165,67 +162,193 @@ static void rustsecp256k1_v0_8_1_ecmult_const(rustsecp256k1_v0_8_1_gej *r, const
|
|||
* the Z coordinate of the result once at the end.
|
||||
*/
|
||||
VERIFY_CHECK(!a->infinity);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(r, a);
|
||||
rustsecp256k1_v0_8_1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(r, a);
|
||||
rustsecp256k1_v0_9_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
|
||||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
|
||||
rustsecp256k1_v0_8_1_fe_normalize_weak(&pre_a[i].y);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_weak(&pre_a[i].y);
|
||||
}
|
||||
if (size > 128) {
|
||||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
|
||||
rustsecp256k1_v0_8_1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_9_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
|
||||
}
|
||||
|
||||
/* first loop iteration (separated out so we can directly set r, rather
|
||||
* than having it start at infinity, get doubled several times, then have
|
||||
* its new value added to it) */
|
||||
i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
|
||||
i = wnaf_1[WNAF_SIZE_BITS(128, WINDOW_A - 1)];
|
||||
VERIFY_CHECK(i != 0);
|
||||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(r, &tmpa);
|
||||
if (size > 128) {
|
||||
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(r, &tmpa);
|
||||
i = wnaf_lam[WNAF_SIZE_BITS(128, WINDOW_A - 1)];
|
||||
VERIFY_CHECK(i != 0);
|
||||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(r, r, &tmpa);
|
||||
}
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(r, r, &tmpa);
|
||||
/* remaining loop iterations */
|
||||
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
|
||||
for (i = WNAF_SIZE_BITS(128, WINDOW_A - 1) - 1; i >= 0; i--) {
|
||||
int n;
|
||||
int j;
|
||||
for (j = 0; j < WINDOW_A - 1; ++j) {
|
||||
rustsecp256k1_v0_8_1_gej_double(r, r);
|
||||
rustsecp256k1_v0_9_0_gej_double(r, r);
|
||||
}
|
||||
|
||||
n = wnaf_1[i];
|
||||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
|
||||
VERIFY_CHECK(n != 0);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(r, r, &tmpa);
|
||||
if (size > 128) {
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(r, r, &tmpa);
|
||||
n = wnaf_lam[i];
|
||||
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
|
||||
VERIFY_CHECK(n != 0);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(r, r, &tmpa);
|
||||
}
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(r, r, &tmpa);
|
||||
}
|
||||
|
||||
{
|
||||
/* Correct for wNAF skew */
|
||||
rustsecp256k1_v0_8_1_gej tmpj;
|
||||
rustsecp256k1_v0_9_0_gej tmpj;
|
||||
|
||||
rustsecp256k1_v0_8_1_ge_neg(&tmpa, &pre_a[0]);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(&tmpj, r, &tmpa);
|
||||
rustsecp256k1_v0_8_1_gej_cmov(r, &tmpj, skew_1);
|
||||
rustsecp256k1_v0_9_0_ge_neg(&tmpa, &pre_a[0]);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(&tmpj, r, &tmpa);
|
||||
rustsecp256k1_v0_9_0_gej_cmov(r, &tmpj, skew_1);
|
||||
|
||||
if (size > 128) {
|
||||
rustsecp256k1_v0_8_1_ge_neg(&tmpa, &pre_a_lam[0]);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(&tmpj, r, &tmpa);
|
||||
rustsecp256k1_v0_8_1_gej_cmov(r, &tmpj, skew_lam);
|
||||
rustsecp256k1_v0_9_0_ge_neg(&tmpa, &pre_a_lam[0]);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(&tmpj, r, &tmpa);
|
||||
rustsecp256k1_v0_9_0_gej_cmov(r, &tmpj, skew_lam);
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_9_0_fe_mul(&r->z, &r->z, &Z);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_ecmult_const_xonly(rustsecp256k1_v0_9_0_fe* r, const rustsecp256k1_v0_9_0_fe *n, const rustsecp256k1_v0_9_0_fe *d, const rustsecp256k1_v0_9_0_scalar *q, int known_on_curve) {
|
||||
|
||||
/* This algorithm is a generalization of Peter Dettman's technique for
|
||||
* avoiding the square root in a random-basepoint x-only multiplication
|
||||
* on a Weierstrass curve:
|
||||
* https://mailarchive.ietf.org/arch/msg/cfrg/7DyYY6gg32wDgHAhgSb6XxMDlJA/
|
||||
*
|
||||
*
|
||||
* === Background: the effective affine technique ===
|
||||
*
|
||||
* Let phi_u be the isomorphism that maps (x, y) on secp256k1 curve y^2 = x^3 + 7 to
|
||||
* x' = u^2*x, y' = u^3*y on curve y'^2 = x'^3 + u^6*7. This new curve has the same order as
|
||||
* the original (it is isomorphic), but moreover, has the same addition/doubling formulas, as
|
||||
* the curve b=7 coefficient does not appear in those formulas (or at least does not appear in
|
||||
* the formulas implemented in this codebase, both affine and Jacobian). See also Example 9.5.2
|
||||
* in https://www.math.auckland.ac.nz/~sgal018/crypto-book/ch9.pdf.
|
||||
*
|
||||
* This means any linear combination of secp256k1 points can be computed by applying phi_u
|
||||
* (with non-zero u) on all input points (including the generator, if used), computing the
|
||||
* linear combination on the isomorphic curve (using the same group laws), and then applying
|
||||
* phi_u^{-1} to get back to secp256k1.
|
||||
*
|
||||
* Switching to Jacobian coordinates, note that phi_u applied to (X, Y, Z) is simply
|
||||
* (X, Y, Z/u). Thus, if we want to compute (X1, Y1, Z) + (X2, Y2, Z), with identical Z
|
||||
* coordinates, we can use phi_Z to transform it to (X1, Y1, 1) + (X2, Y2, 1) on an isomorphic
|
||||
* curve where the affine addition formula can be used instead.
|
||||
* If (X3, Y3, Z3) = (X1, Y1) + (X2, Y2) on that curve, then our answer on secp256k1 is
|
||||
* (X3, Y3, Z3*Z).
|
||||
*
|
||||
* This is the effective affine technique: if we have a linear combination of group elements
|
||||
* to compute, and all those group elements have the same Z coordinate, we can simply pretend
|
||||
* that all those Z coordinates are 1, perform the computation that way, and then multiply the
|
||||
* original Z coordinate back in.
|
||||
*
|
||||
* The technique works on any a=0 short Weierstrass curve. It is possible to generalize it to
|
||||
* other curves too, but there the isomorphic curves will have different 'a' coefficients,
|
||||
* which typically does affect the group laws.
|
||||
*
|
||||
*
|
||||
* === Avoiding the square root for x-only point multiplication ===
|
||||
*
|
||||
* In this function, we want to compute the X coordinate of q*(n/d, y), for
|
||||
* y = sqrt((n/d)^3 + 7). Its negation would also be a valid Y coordinate, but by convention
|
||||
* we pick whatever sqrt returns (which we assume to be a deterministic function).
|
||||
*
|
||||
* Let g = y^2*d^3 = n^3 + 7*d^3. This also means y = sqrt(g/d^3).
|
||||
* Further let v = sqrt(d*g), which must exist as d*g = y^2*d^4 = (y*d^2)^2.
|
||||
*
|
||||
* The input point (n/d, y) also has Jacobian coordinates:
|
||||
*
|
||||
* (n/d, y, 1)
|
||||
* = (n/d * v^2, y * v^3, v)
|
||||
* = (n/d * d*g, y * sqrt(d^3*g^3), v)
|
||||
* = (n/d * d*g, sqrt(y^2 * d^3*g^3), v)
|
||||
* = (n*g, sqrt(g/d^3 * d^3*g^3), v)
|
||||
* = (n*g, sqrt(g^4), v)
|
||||
* = (n*g, g^2, v)
|
||||
*
|
||||
* It is easy to verify that both (n*g, g^2, v) and its negation (n*g, -g^2, v) have affine X
|
||||
* coordinate n/d, and this holds even when the square root function doesn't have a
|
||||
* deterministic sign. We choose the (n*g, g^2, v) version.
|
||||
*
|
||||
* Now switch to the effective affine curve using phi_v, where the input point has coordinates
|
||||
* (n*g, g^2). Compute (X, Y, Z) = q * (n*g, g^2) there.
|
||||
*
|
||||
* Back on secp256k1, that means q * (n*g, g^2, v) = (X, Y, v*Z). This last point has affine X
|
||||
* coordinate X / (v^2*Z^2) = X / (d*g*Z^2). Determining the affine Y coordinate would involve
|
||||
* a square root, but as long as we only care about the resulting X coordinate, no square root
|
||||
* is needed anywhere in this computation.
|
||||
*/
|
||||
|
||||
rustsecp256k1_v0_9_0_fe g, i;
|
||||
rustsecp256k1_v0_9_0_ge p;
|
||||
rustsecp256k1_v0_9_0_gej rj;
|
||||
|
||||
/* Compute g = (n^3 + B*d^3). */
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&g, n);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&g, &g, n);
|
||||
if (d) {
|
||||
rustsecp256k1_v0_9_0_fe b;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(!rustsecp256k1_v0_9_0_fe_normalizes_to_zero(d));
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&b, d);
|
||||
VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */
|
||||
rustsecp256k1_v0_9_0_fe_mul_int(&b, SECP256K1_B);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&b, &b, d);
|
||||
rustsecp256k1_v0_9_0_fe_add(&g, &b);
|
||||
if (!known_on_curve) {
|
||||
/* We need to determine whether (n/d)^3 + 7 is square.
|
||||
*
|
||||
* is_square((n/d)^3 + 7)
|
||||
* <=> is_square(((n/d)^3 + 7) * d^4)
|
||||
* <=> is_square((n^3 + 7*d^3) * d)
|
||||
* <=> is_square(g * d)
|
||||
*/
|
||||
rustsecp256k1_v0_9_0_fe c;
|
||||
rustsecp256k1_v0_9_0_fe_mul(&c, &g, d);
|
||||
if (!rustsecp256k1_v0_9_0_fe_is_square_var(&c)) return 0;
|
||||
}
|
||||
} else {
|
||||
rustsecp256k1_v0_9_0_fe_add_int(&g, SECP256K1_B);
|
||||
if (!known_on_curve) {
|
||||
/* g at this point equals x^3 + 7. Test if it is square. */
|
||||
if (!rustsecp256k1_v0_9_0_fe_is_square_var(&g)) return 0;
|
||||
}
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_mul(&r->z, &r->z, &Z);
|
||||
/* Compute base point P = (n*g, g^2), the effective affine version of (n*g, g^2, v), which has
|
||||
* corresponding affine X coordinate n/d. */
|
||||
rustsecp256k1_v0_9_0_fe_mul(&p.x, &g, n);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&p.y, &g);
|
||||
p.infinity = 0;
|
||||
|
||||
/* Perform x-only EC multiplication of P with q. */
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(!rustsecp256k1_v0_9_0_scalar_is_zero(q));
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_ecmult_const(&rj, &p, q);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(!rustsecp256k1_v0_9_0_gej_is_infinity(&rj));
|
||||
#endif
|
||||
|
||||
/* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to
|
||||
* (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate
|
||||
* (X / (Z^2*d*g)). */
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&i, &rj.z);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&i, &i, &g);
|
||||
if (d) rustsecp256k1_v0_9_0_fe_mul(&i, &i, d);
|
||||
rustsecp256k1_v0_9_0_fe_inv(&i, &i);
|
||||
rustsecp256k1_v0_9_0_fe_mul(r, &rj.x, &i);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_ECMULT_CONST_IMPL_H */
|
||||
|
|
|
@ -33,16 +33,16 @@ typedef struct {
|
|||
int built;
|
||||
|
||||
/* Blinding values used when computing (n-b)G + bG. */
|
||||
rustsecp256k1_v0_8_1_scalar blind; /* -b */
|
||||
rustsecp256k1_v0_8_1_gej initial; /* bG */
|
||||
} rustsecp256k1_v0_8_1_ecmult_gen_context;
|
||||
rustsecp256k1_v0_9_0_scalar blind; /* -b */
|
||||
rustsecp256k1_v0_9_0_gej initial; /* bG */
|
||||
} rustsecp256k1_v0_9_0_ecmult_gen_context;
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_context_build(rustsecp256k1_v0_8_1_ecmult_gen_context* ctx);
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_context_clear(rustsecp256k1_v0_8_1_ecmult_gen_context* ctx);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_context_build(rustsecp256k1_v0_9_0_ecmult_gen_context* ctx);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_context_clear(rustsecp256k1_v0_9_0_ecmult_gen_context* ctx);
|
||||
|
||||
/** Multiply with the generator: R = a*G */
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen(const rustsecp256k1_v0_8_1_ecmult_gen_context* ctx, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *a);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen(const rustsecp256k1_v0_9_0_ecmult_gen_context* ctx, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *a);
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_blind(rustsecp256k1_v0_8_1_ecmult_gen_context *ctx, const unsigned char *seed32);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_blind(rustsecp256k1_v0_9_0_ecmult_gen_context *ctx, const unsigned char *seed32);
|
||||
|
||||
#endif /* SECP256K1_ECMULT_GEN_H */
|
||||
|
|
|
@ -9,6 +9,6 @@
|
|||
|
||||
#include "ecmult_gen.h"
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_compute_table(rustsecp256k1_v0_8_1_ge_storage* table, const rustsecp256k1_v0_8_1_ge* gen, int bits);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_compute_table(rustsecp256k1_v0_9_0_ge_storage* table, const rustsecp256k1_v0_9_0_ge* gen, int bits);
|
||||
|
||||
#endif /* SECP256K1_ECMULT_GEN_COMPUTE_TABLE_H */
|
||||
|
|
|
@ -13,66 +13,69 @@
|
|||
#include "ecmult_gen.h"
|
||||
#include "util.h"
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_compute_table(rustsecp256k1_v0_8_1_ge_storage* table, const rustsecp256k1_v0_8_1_ge* gen, int bits) {
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_compute_table(rustsecp256k1_v0_9_0_ge_storage* table, const rustsecp256k1_v0_9_0_ge* gen, int bits) {
|
||||
int g = ECMULT_GEN_PREC_G(bits);
|
||||
int n = ECMULT_GEN_PREC_N(bits);
|
||||
|
||||
rustsecp256k1_v0_8_1_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec));
|
||||
rustsecp256k1_v0_8_1_gej gj;
|
||||
rustsecp256k1_v0_8_1_gej nums_gej;
|
||||
rustsecp256k1_v0_9_0_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec));
|
||||
rustsecp256k1_v0_9_0_gej gj;
|
||||
rustsecp256k1_v0_9_0_gej nums_gej;
|
||||
int i, j;
|
||||
|
||||
VERIFY_CHECK(g > 0);
|
||||
VERIFY_CHECK(n > 0);
|
||||
|
||||
/* get the generator */
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&gj, gen);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&gj, gen);
|
||||
|
||||
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
|
||||
{
|
||||
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
|
||||
rustsecp256k1_v0_8_1_fe nums_x;
|
||||
rustsecp256k1_v0_8_1_ge nums_ge;
|
||||
rustsecp256k1_v0_9_0_fe nums_x;
|
||||
rustsecp256k1_v0_9_0_ge nums_ge;
|
||||
int r;
|
||||
r = rustsecp256k1_v0_8_1_fe_set_b32(&nums_x, nums_b32);
|
||||
r = rustsecp256k1_v0_9_0_fe_set_b32_limit(&nums_x, nums_b32);
|
||||
(void)r;
|
||||
VERIFY_CHECK(r);
|
||||
r = rustsecp256k1_v0_8_1_ge_set_xo_var(&nums_ge, &nums_x, 0);
|
||||
r = rustsecp256k1_v0_9_0_ge_set_xo_var(&nums_ge, &nums_x, 0);
|
||||
(void)r;
|
||||
VERIFY_CHECK(r);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&nums_gej, &nums_ge);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&nums_gej, &nums_ge);
|
||||
/* Add G to make the bits in x uniformly distributed. */
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL);
|
||||
}
|
||||
|
||||
/* compute prec. */
|
||||
{
|
||||
rustsecp256k1_v0_8_1_gej gbase;
|
||||
rustsecp256k1_v0_8_1_gej numsbase;
|
||||
rustsecp256k1_v0_8_1_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */
|
||||
rustsecp256k1_v0_9_0_gej gbase;
|
||||
rustsecp256k1_v0_9_0_gej numsbase;
|
||||
rustsecp256k1_v0_9_0_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */
|
||||
gbase = gj; /* PREC_G^j * G */
|
||||
numsbase = nums_gej; /* 2^j * nums. */
|
||||
for (j = 0; j < n; j++) {
|
||||
/* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
|
||||
precj[j*g] = numsbase;
|
||||
for (i = 1; i < g; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL);
|
||||
}
|
||||
/* Multiply gbase by PREC_G. */
|
||||
for (i = 0; i < bits; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&gbase, &gbase, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&gbase, &gbase, NULL);
|
||||
}
|
||||
/* Multiply numbase by 2. */
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&numsbase, &numsbase, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&numsbase, &numsbase, NULL);
|
||||
if (j == n - 2) {
|
||||
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
|
||||
rustsecp256k1_v0_8_1_gej_neg(&numsbase, &numsbase);
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_neg(&numsbase, &numsbase);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
|
||||
}
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ge_set_all_gej_var(prec, precj, n * g);
|
||||
rustsecp256k1_v0_9_0_ge_set_all_gej_var(prec, precj, n * g);
|
||||
free(precj);
|
||||
}
|
||||
for (j = 0; j < n; j++) {
|
||||
for (i = 0; i < g; i++) {
|
||||
rustsecp256k1_v0_8_1_ge_to_storage(&table[j*g + i], &prec[j*g + i]);
|
||||
rustsecp256k1_v0_9_0_ge_to_storage(&table[j*g + i], &prec[j*g + i]);
|
||||
}
|
||||
}
|
||||
free(prec);
|
||||
|
|
|
@ -14,19 +14,19 @@
|
|||
#include "hash_impl.h"
|
||||
#include "precomputed_ecmult_gen.h"
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_context_build(rustsecp256k1_v0_8_1_ecmult_gen_context *ctx) {
|
||||
rustsecp256k1_v0_8_1_ecmult_gen_blind(ctx, NULL);
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_context_build(rustsecp256k1_v0_9_0_ecmult_gen_context *ctx) {
|
||||
rustsecp256k1_v0_9_0_ecmult_gen_blind(ctx, NULL);
|
||||
ctx->built = 1;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_8_1_ecmult_gen_context* ctx) {
|
||||
static int rustsecp256k1_v0_9_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_9_0_ecmult_gen_context* ctx) {
|
||||
return ctx->built;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_context_clear(rustsecp256k1_v0_8_1_ecmult_gen_context *ctx) {
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_context_clear(rustsecp256k1_v0_9_0_ecmult_gen_context *ctx) {
|
||||
ctx->built = 0;
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&ctx->blind);
|
||||
rustsecp256k1_v0_8_1_gej_clear(&ctx->initial);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&ctx->blind);
|
||||
rustsecp256k1_v0_9_0_gej_clear(&ctx->initial);
|
||||
}
|
||||
|
||||
/* For accelerating the computation of a*G:
|
||||
|
@ -40,25 +40,25 @@ static void rustsecp256k1_v0_8_1_ecmult_gen_context_clear(rustsecp256k1_v0_8_1_e
|
|||
* precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0 ... PREC_N-1).
|
||||
* None of the resulting prec group elements have a known scalar, and neither do any of
|
||||
* the intermediate sums while computing a*G.
|
||||
* The prec values are stored in rustsecp256k1_v0_8_1_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i.
|
||||
* The prec values are stored in rustsecp256k1_v0_9_0_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen(const rustsecp256k1_v0_8_1_ecmult_gen_context *ctx, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *gn) {
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen(const rustsecp256k1_v0_9_0_ecmult_gen_context *ctx, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *gn) {
|
||||
int bits = ECMULT_GEN_PREC_BITS;
|
||||
int g = ECMULT_GEN_PREC_G(bits);
|
||||
int n = ECMULT_GEN_PREC_N(bits);
|
||||
|
||||
rustsecp256k1_v0_8_1_ge add;
|
||||
rustsecp256k1_v0_8_1_ge_storage adds;
|
||||
rustsecp256k1_v0_8_1_scalar gnb;
|
||||
rustsecp256k1_v0_9_0_ge add;
|
||||
rustsecp256k1_v0_9_0_ge_storage adds;
|
||||
rustsecp256k1_v0_9_0_scalar gnb;
|
||||
int i, j, n_i;
|
||||
|
||||
memset(&adds, 0, sizeof(adds));
|
||||
*r = ctx->initial;
|
||||
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
|
||||
rustsecp256k1_v0_8_1_scalar_add(&gnb, gn, &ctx->blind);
|
||||
rustsecp256k1_v0_9_0_scalar_add(&gnb, gn, &ctx->blind);
|
||||
add.infinity = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
n_i = rustsecp256k1_v0_8_1_scalar_get_bits(&gnb, i * bits, bits);
|
||||
n_i = rustsecp256k1_v0_9_0_scalar_get_bits(&gnb, i * bits, bits);
|
||||
for (j = 0; j < g; j++) {
|
||||
/** This uses a conditional move to avoid any secret data in array indexes.
|
||||
* _Any_ use of secret indexes has been demonstrated to result in timing
|
||||
|
@ -70,64 +70,61 @@ static void rustsecp256k1_v0_8_1_ecmult_gen(const rustsecp256k1_v0_8_1_ecmult_ge
|
|||
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
|
||||
* (https://www.tau.ac.il/~tromer/papers/cache.pdf)
|
||||
*/
|
||||
rustsecp256k1_v0_8_1_ge_storage_cmov(&adds, &rustsecp256k1_v0_8_1_ecmult_gen_prec_table[i][j], j == n_i);
|
||||
rustsecp256k1_v0_9_0_ge_storage_cmov(&adds, &rustsecp256k1_v0_9_0_ecmult_gen_prec_table[i][j], j == n_i);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ge_from_storage(&add, &adds);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge(r, r, &add);
|
||||
rustsecp256k1_v0_9_0_ge_from_storage(&add, &adds);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge(r, r, &add);
|
||||
}
|
||||
n_i = 0;
|
||||
rustsecp256k1_v0_8_1_ge_clear(&add);
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&gnb);
|
||||
rustsecp256k1_v0_9_0_ge_clear(&add);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&gnb);
|
||||
}
|
||||
|
||||
/* Setup blinding values for rustsecp256k1_v0_8_1_ecmult_gen. */
|
||||
static void rustsecp256k1_v0_8_1_ecmult_gen_blind(rustsecp256k1_v0_8_1_ecmult_gen_context *ctx, const unsigned char *seed32) {
|
||||
rustsecp256k1_v0_8_1_scalar b;
|
||||
rustsecp256k1_v0_8_1_gej gb;
|
||||
rustsecp256k1_v0_8_1_fe s;
|
||||
/* Setup blinding values for rustsecp256k1_v0_9_0_ecmult_gen. */
|
||||
static void rustsecp256k1_v0_9_0_ecmult_gen_blind(rustsecp256k1_v0_9_0_ecmult_gen_context *ctx, const unsigned char *seed32) {
|
||||
rustsecp256k1_v0_9_0_scalar b;
|
||||
rustsecp256k1_v0_9_0_gej gb;
|
||||
rustsecp256k1_v0_9_0_fe s;
|
||||
unsigned char nonce32[32];
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 rng;
|
||||
int overflow;
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 rng;
|
||||
unsigned char keydata[64];
|
||||
if (seed32 == NULL) {
|
||||
/* When seed is NULL, reset the initial point and blinding value. */
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_8_1_ge_const_g);
|
||||
rustsecp256k1_v0_8_1_gej_neg(&ctx->initial, &ctx->initial);
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&ctx->blind, 1);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_9_0_ge_const_g);
|
||||
rustsecp256k1_v0_9_0_gej_neg(&ctx->initial, &ctx->initial);
|
||||
rustsecp256k1_v0_9_0_scalar_set_int(&ctx->blind, 1);
|
||||
return;
|
||||
}
|
||||
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
|
||||
rustsecp256k1_v0_8_1_scalar_get_b32(keydata, &ctx->blind);
|
||||
rustsecp256k1_v0_9_0_scalar_get_b32(keydata, &ctx->blind);
|
||||
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
|
||||
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
|
||||
* asking the caller for blinding values directly and expecting them to retry on failure.
|
||||
*/
|
||||
VERIFY_CHECK(seed32 != NULL);
|
||||
memcpy(keydata + 32, seed32, 32);
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_initialize(&rng, keydata, 64);
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_initialize(&rng, keydata, 64);
|
||||
memset(keydata, 0, sizeof(keydata));
|
||||
/* Accept unobservably small non-uniformity. */
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
||||
overflow = !rustsecp256k1_v0_8_1_fe_set_b32(&s, nonce32);
|
||||
overflow |= rustsecp256k1_v0_8_1_fe_is_zero(&s);
|
||||
rustsecp256k1_v0_8_1_fe_cmov(&s, &rustsecp256k1_v0_8_1_fe_one, overflow);
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
||||
rustsecp256k1_v0_9_0_fe_set_b32_mod(&s, nonce32);
|
||||
rustsecp256k1_v0_9_0_fe_cmov(&s, &rustsecp256k1_v0_9_0_fe_one, rustsecp256k1_v0_9_0_fe_normalizes_to_zero(&s));
|
||||
/* Randomize the projection to defend against multiplier sidechannels.
|
||||
Do this before our own call to rustsecp256k1_v0_8_1_ecmult_gen below. */
|
||||
rustsecp256k1_v0_8_1_gej_rescale(&ctx->initial, &s);
|
||||
rustsecp256k1_v0_8_1_fe_clear(&s);
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
||||
rustsecp256k1_v0_8_1_scalar_set_b32(&b, nonce32, NULL);
|
||||
Do this before our own call to rustsecp256k1_v0_9_0_ecmult_gen below. */
|
||||
rustsecp256k1_v0_9_0_gej_rescale(&ctx->initial, &s);
|
||||
rustsecp256k1_v0_9_0_fe_clear(&s);
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
||||
rustsecp256k1_v0_9_0_scalar_set_b32(&b, nonce32, NULL);
|
||||
/* A blinding value of 0 works, but would undermine the projection hardening. */
|
||||
rustsecp256k1_v0_8_1_scalar_cmov(&b, &rustsecp256k1_v0_8_1_scalar_one, rustsecp256k1_v0_8_1_scalar_is_zero(&b));
|
||||
rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_finalize(&rng);
|
||||
rustsecp256k1_v0_9_0_scalar_cmov(&b, &rustsecp256k1_v0_9_0_scalar_one, rustsecp256k1_v0_9_0_scalar_is_zero(&b));
|
||||
rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_finalize(&rng);
|
||||
memset(nonce32, 0, 32);
|
||||
/* The random projection in ctx->initial ensures that gb will have a random projection. */
|
||||
rustsecp256k1_v0_8_1_ecmult_gen(ctx, &gb, &b);
|
||||
rustsecp256k1_v0_8_1_scalar_negate(&b, &b);
|
||||
rustsecp256k1_v0_9_0_ecmult_gen(ctx, &gb, &b);
|
||||
rustsecp256k1_v0_9_0_scalar_negate(&b, &b);
|
||||
ctx->blind = b;
|
||||
ctx->initial = gb;
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&b);
|
||||
rustsecp256k1_v0_8_1_gej_clear(&gb);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&b);
|
||||
rustsecp256k1_v0_9_0_gej_clear(&gb);
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_ECMULT_GEN_IMPL_H */
|
||||
|
|
|
@ -33,8 +33,8 @@
|
|||
/** Larger values for ECMULT_WINDOW_SIZE result in possibly better
|
||||
* performance at the cost of an exponentially larger precomputed
|
||||
* table. The exact table size is
|
||||
* (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_8_1_ge_storage) bytes,
|
||||
* where sizeof(rustsecp256k1_v0_8_1_ge_storage) is typically 64 bytes but can
|
||||
* (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_9_0_ge_storage) bytes,
|
||||
* where sizeof(rustsecp256k1_v0_9_0_ge_storage) is typically 64 bytes but can
|
||||
* be larger due to platform-specific padding and alignment.
|
||||
* Two tables of this size are used (due to the endomorphism
|
||||
* optimization).
|
||||
|
@ -70,14 +70,14 @@
|
|||
* Lastly the zr[0] value, which isn't used above, is set so that:
|
||||
* - a.z = z(pre_a[0]) / zr[0]
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_8_1_ge *pre_a, rustsecp256k1_v0_8_1_fe *zr, rustsecp256k1_v0_8_1_fe *z, const rustsecp256k1_v0_8_1_gej *a) {
|
||||
rustsecp256k1_v0_8_1_gej d, ai;
|
||||
rustsecp256k1_v0_8_1_ge d_ge;
|
||||
static void rustsecp256k1_v0_9_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_9_0_ge *pre_a, rustsecp256k1_v0_9_0_fe *zr, rustsecp256k1_v0_9_0_fe *z, const rustsecp256k1_v0_9_0_gej *a) {
|
||||
rustsecp256k1_v0_9_0_gej d, ai;
|
||||
rustsecp256k1_v0_9_0_ge d_ge;
|
||||
int i;
|
||||
|
||||
VERIFY_CHECK(!a->infinity);
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_double_var(&d, a, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(&d, a, NULL);
|
||||
|
||||
/*
|
||||
* Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z.
|
||||
|
@ -90,62 +90,65 @@ static void rustsecp256k1_v0_8_1_ecmult_odd_multiples_table(int n, rustsecp256k1
|
|||
*
|
||||
* The group addition functions work correctly on these isomorphic curves.
|
||||
* In particular phi(d) is easy to represent in affine coordinates under this isomorphism.
|
||||
* This lets us use the faster rustsecp256k1_v0_8_1_gej_add_ge_var group addition function that we wouldn't be able to use otherwise.
|
||||
* This lets us use the faster rustsecp256k1_v0_9_0_gej_add_ge_var group addition function that we wouldn't be able to use otherwise.
|
||||
*/
|
||||
rustsecp256k1_v0_8_1_ge_set_xy(&d_ge, &d.x, &d.y);
|
||||
rustsecp256k1_v0_8_1_ge_set_gej_zinv(&pre_a[0], a, &d.z);
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&ai, &pre_a[0]);
|
||||
rustsecp256k1_v0_9_0_ge_set_xy(&d_ge, &d.x, &d.y);
|
||||
rustsecp256k1_v0_9_0_ge_set_gej_zinv(&pre_a[0], a, &d.z);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&ai, &pre_a[0]);
|
||||
ai.z = a->z;
|
||||
|
||||
/* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equvalent to a.
|
||||
/* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equivalent to a.
|
||||
* Set zr[0] to C, which is the ratio between the omitted z(pre_a[0]) value and a.z.
|
||||
*/
|
||||
zr[0] = d.z;
|
||||
|
||||
for (i = 1; i < n; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]);
|
||||
rustsecp256k1_v0_8_1_ge_set_xy(&pre_a[i], &ai.x, &ai.y);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]);
|
||||
rustsecp256k1_v0_9_0_ge_set_xy(&pre_a[i], &ai.x, &ai.y);
|
||||
}
|
||||
|
||||
/* Multiply the last z-coordinate by C to undo the isomorphism.
|
||||
* Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios,
|
||||
* undoing the isomorphism here undoes the isomorphism for all pre_a values.
|
||||
*/
|
||||
rustsecp256k1_v0_8_1_fe_mul(z, &ai.z, &d.z);
|
||||
rustsecp256k1_v0_9_0_fe_mul(z, &ai.z, &d.z);
|
||||
}
|
||||
|
||||
#define SECP256K1_ECMULT_TABLE_VERIFY(n,w) \
|
||||
VERIFY_CHECK(((n) & 1) == 1); \
|
||||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_ecmult_table_verify(int n, int w) {
|
||||
(void)n;
|
||||
(void)w;
|
||||
VERIFY_CHECK(((n) & 1) == 1);
|
||||
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1));
|
||||
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1));
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_table_get_ge(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge *pre, int n, int w) {
|
||||
SECP256K1_ECMULT_TABLE_VERIFY(n,w)
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_ecmult_table_get_ge(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge *pre, int n, int w) {
|
||||
rustsecp256k1_v0_9_0_ecmult_table_verify(n,w);
|
||||
if (n > 0) {
|
||||
*r = pre[(n-1)/2];
|
||||
} else {
|
||||
*r = pre[(-n-1)/2];
|
||||
rustsecp256k1_v0_8_1_fe_negate(&(r->y), &(r->y), 1);
|
||||
rustsecp256k1_v0_9_0_fe_negate(&(r->y), &(r->y), 1);
|
||||
}
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_table_get_ge_lambda(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge *pre, const rustsecp256k1_v0_8_1_fe *x, int n, int w) {
|
||||
SECP256K1_ECMULT_TABLE_VERIFY(n,w)
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_ecmult_table_get_ge_lambda(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge *pre, const rustsecp256k1_v0_9_0_fe *x, int n, int w) {
|
||||
rustsecp256k1_v0_9_0_ecmult_table_verify(n,w);
|
||||
if (n > 0) {
|
||||
rustsecp256k1_v0_8_1_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y);
|
||||
rustsecp256k1_v0_9_0_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y);
|
||||
} else {
|
||||
rustsecp256k1_v0_8_1_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y);
|
||||
rustsecp256k1_v0_8_1_fe_negate(&(r->y), &(r->y), 1);
|
||||
rustsecp256k1_v0_9_0_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y);
|
||||
rustsecp256k1_v0_9_0_fe_negate(&(r->y), &(r->y), 1);
|
||||
}
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_table_get_ge_storage(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge_storage *pre, int n, int w) {
|
||||
SECP256K1_ECMULT_TABLE_VERIFY(n,w)
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_ecmult_table_get_ge_storage(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge_storage *pre, int n, int w) {
|
||||
rustsecp256k1_v0_9_0_ecmult_table_verify(n,w);
|
||||
if (n > 0) {
|
||||
rustsecp256k1_v0_8_1_ge_from_storage(r, &pre[(n-1)/2]);
|
||||
rustsecp256k1_v0_9_0_ge_from_storage(r, &pre[(n-1)/2]);
|
||||
} else {
|
||||
rustsecp256k1_v0_8_1_ge_from_storage(r, &pre[(-n-1)/2]);
|
||||
rustsecp256k1_v0_8_1_fe_negate(&(r->y), &(r->y), 1);
|
||||
rustsecp256k1_v0_9_0_ge_from_storage(r, &pre[(-n-1)/2]);
|
||||
rustsecp256k1_v0_9_0_fe_negate(&(r->y), &(r->y), 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,8 +159,8 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_table_get_ge_storage(ru
|
|||
* - the number of set values in wnaf is returned. This number is at most 256, and at most one more
|
||||
* than the number of bits in the (absolute value) of the input.
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_8_1_scalar *a, int w) {
|
||||
rustsecp256k1_v0_8_1_scalar s;
|
||||
static int rustsecp256k1_v0_9_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_9_0_scalar *a, int w) {
|
||||
rustsecp256k1_v0_9_0_scalar s;
|
||||
int last_set_bit = -1;
|
||||
int bit = 0;
|
||||
int sign = 1;
|
||||
|
@ -171,15 +174,15 @@ static int rustsecp256k1_v0_8_1_ecmult_wnaf(int *wnaf, int len, const rustsecp25
|
|||
memset(wnaf, 0, len * sizeof(wnaf[0]));
|
||||
|
||||
s = *a;
|
||||
if (rustsecp256k1_v0_8_1_scalar_get_bits(&s, 255, 1)) {
|
||||
rustsecp256k1_v0_8_1_scalar_negate(&s, &s);
|
||||
if (rustsecp256k1_v0_9_0_scalar_get_bits(&s, 255, 1)) {
|
||||
rustsecp256k1_v0_9_0_scalar_negate(&s, &s);
|
||||
sign = -1;
|
||||
}
|
||||
|
||||
while (bit < len) {
|
||||
int now;
|
||||
int word;
|
||||
if (rustsecp256k1_v0_8_1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
|
||||
if (rustsecp256k1_v0_9_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
|
||||
bit++;
|
||||
continue;
|
||||
}
|
||||
|
@ -189,7 +192,7 @@ static int rustsecp256k1_v0_8_1_ecmult_wnaf(int *wnaf, int len, const rustsecp25
|
|||
now = len - bit;
|
||||
}
|
||||
|
||||
word = rustsecp256k1_v0_8_1_scalar_get_bits_var(&s, bit, now) + carry;
|
||||
word = rustsecp256k1_v0_9_0_scalar_get_bits_var(&s, bit, now) + carry;
|
||||
|
||||
carry = (word >> (w-1)) & 1;
|
||||
word -= carry << w;
|
||||
|
@ -206,7 +209,7 @@ static int rustsecp256k1_v0_8_1_ecmult_wnaf(int *wnaf, int len, const rustsecp25
|
|||
VERIFY_CHECK(carry == 0);
|
||||
|
||||
while (verify_bit < 256) {
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_scalar_get_bits(&s, verify_bit, 1) == 0);
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_scalar_get_bits(&s, verify_bit, 1) == 0);
|
||||
verify_bit++;
|
||||
}
|
||||
}
|
||||
|
@ -214,25 +217,25 @@ static int rustsecp256k1_v0_8_1_ecmult_wnaf(int *wnaf, int len, const rustsecp25
|
|||
return last_set_bit + 1;
|
||||
}
|
||||
|
||||
struct rustsecp256k1_v0_8_1_strauss_point_state {
|
||||
struct rustsecp256k1_v0_9_0_strauss_point_state {
|
||||
int wnaf_na_1[129];
|
||||
int wnaf_na_lam[129];
|
||||
int bits_na_1;
|
||||
int bits_na_lam;
|
||||
};
|
||||
|
||||
struct rustsecp256k1_v0_8_1_strauss_state {
|
||||
struct rustsecp256k1_v0_9_0_strauss_state {
|
||||
/* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */
|
||||
rustsecp256k1_v0_8_1_fe* aux;
|
||||
rustsecp256k1_v0_8_1_ge* pre_a;
|
||||
struct rustsecp256k1_v0_8_1_strauss_point_state* ps;
|
||||
rustsecp256k1_v0_9_0_fe* aux;
|
||||
rustsecp256k1_v0_9_0_ge* pre_a;
|
||||
struct rustsecp256k1_v0_9_0_strauss_point_state* ps;
|
||||
};
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_8_1_strauss_state *state, rustsecp256k1_v0_8_1_gej *r, size_t num, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_scalar *na, const rustsecp256k1_v0_8_1_scalar *ng) {
|
||||
rustsecp256k1_v0_8_1_ge tmpa;
|
||||
rustsecp256k1_v0_8_1_fe Z;
|
||||
static void rustsecp256k1_v0_9_0_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_9_0_strauss_state *state, rustsecp256k1_v0_9_0_gej *r, size_t num, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_scalar *na, const rustsecp256k1_v0_9_0_scalar *ng) {
|
||||
rustsecp256k1_v0_9_0_ge tmpa;
|
||||
rustsecp256k1_v0_9_0_fe Z;
|
||||
/* Split G factors. */
|
||||
rustsecp256k1_v0_8_1_scalar ng_1, ng_128;
|
||||
rustsecp256k1_v0_9_0_scalar ng_1, ng_128;
|
||||
int wnaf_ng_1[129];
|
||||
int bits_ng_1 = 0;
|
||||
int wnaf_ng_128[129];
|
||||
|
@ -242,19 +245,19 @@ static void rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(const struct rustsecp256k1_
|
|||
size_t np;
|
||||
size_t no = 0;
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_set_int(&Z, 1);
|
||||
rustsecp256k1_v0_9_0_fe_set_int(&Z, 1);
|
||||
for (np = 0; np < num; ++np) {
|
||||
rustsecp256k1_v0_8_1_gej tmp;
|
||||
rustsecp256k1_v0_8_1_scalar na_1, na_lam;
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_zero(&na[np]) || rustsecp256k1_v0_8_1_gej_is_infinity(&a[np])) {
|
||||
rustsecp256k1_v0_9_0_gej tmp;
|
||||
rustsecp256k1_v0_9_0_scalar na_1, na_lam;
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_9_0_gej_is_infinity(&a[np])) {
|
||||
continue;
|
||||
}
|
||||
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
|
||||
rustsecp256k1_v0_8_1_scalar_split_lambda(&na_1, &na_lam, &na[np]);
|
||||
rustsecp256k1_v0_9_0_scalar_split_lambda(&na_1, &na_lam, &na[np]);
|
||||
|
||||
/* build wnaf representation for na_1 and na_lam. */
|
||||
state->ps[no].bits_na_1 = rustsecp256k1_v0_8_1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A);
|
||||
state->ps[no].bits_na_lam = rustsecp256k1_v0_8_1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A);
|
||||
state->ps[no].bits_na_1 = rustsecp256k1_v0_9_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A);
|
||||
state->ps[no].bits_na_lam = rustsecp256k1_v0_9_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A);
|
||||
VERIFY_CHECK(state->ps[no].bits_na_1 <= 129);
|
||||
VERIFY_CHECK(state->ps[no].bits_na_lam <= 129);
|
||||
if (state->ps[no].bits_na_1 > bits) {
|
||||
|
@ -271,38 +274,37 @@ static void rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(const struct rustsecp256k1_
|
|||
* the Z coordinate of the result once at the end.
|
||||
* The exception is the precomputed G table points, which are actually
|
||||
* affine. Compared to the base used for other points, they have a Z ratio
|
||||
* of 1/Z, so we can use rustsecp256k1_v0_8_1_gej_add_zinv_var, which uses the same
|
||||
* of 1/Z, so we can use rustsecp256k1_v0_9_0_gej_add_zinv_var, which uses the same
|
||||
* isomorphism to efficiently add with a known Z inverse.
|
||||
*/
|
||||
tmp = a[np];
|
||||
if (no) {
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&Z);
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_gej_rescale(&tmp, &Z);
|
||||
rustsecp256k1_v0_9_0_gej_rescale(&tmp, &Z);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp);
|
||||
if (no) rustsecp256k1_v0_8_1_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z));
|
||||
rustsecp256k1_v0_9_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp);
|
||||
if (no) rustsecp256k1_v0_9_0_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z));
|
||||
|
||||
++no;
|
||||
}
|
||||
|
||||
/* Bring them to the same Z denominator. */
|
||||
rustsecp256k1_v0_8_1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux);
|
||||
if (no) {
|
||||
rustsecp256k1_v0_9_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux);
|
||||
}
|
||||
|
||||
for (np = 0; np < no; ++np) {
|
||||
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
|
||||
rustsecp256k1_v0_8_1_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &rustsecp256k1_v0_8_1_const_beta);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &rustsecp256k1_v0_9_0_const_beta);
|
||||
}
|
||||
}
|
||||
|
||||
if (ng) {
|
||||
/* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
|
||||
rustsecp256k1_v0_8_1_scalar_split_128(&ng_1, &ng_128, ng);
|
||||
rustsecp256k1_v0_9_0_scalar_split_128(&ng_1, &ng_128, ng);
|
||||
|
||||
/* Build wnaf representation for ng_1 and ng_128 */
|
||||
bits_ng_1 = rustsecp256k1_v0_8_1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
|
||||
bits_ng_128 = rustsecp256k1_v0_8_1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
|
||||
bits_ng_1 = rustsecp256k1_v0_9_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
|
||||
bits_ng_128 = rustsecp256k1_v0_9_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
|
||||
if (bits_ng_1 > bits) {
|
||||
bits = bits_ng_1;
|
||||
}
|
||||
|
@ -311,61 +313,61 @@ static void rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(const struct rustsecp256k1_
|
|||
}
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
|
||||
for (i = bits - 1; i >= 0; i--) {
|
||||
int n;
|
||||
rustsecp256k1_v0_8_1_gej_double_var(r, r, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(r, r, NULL);
|
||||
for (np = 0; np < no; ++np) {
|
||||
if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
|
||||
rustsecp256k1_v0_8_1_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(r, r, &tmpa, NULL);
|
||||
rustsecp256k1_v0_9_0_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(r, r, &tmpa, NULL);
|
||||
}
|
||||
if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) {
|
||||
rustsecp256k1_v0_8_1_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(r, r, &tmpa, NULL);
|
||||
rustsecp256k1_v0_9_0_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(r, r, &tmpa, NULL);
|
||||
}
|
||||
}
|
||||
if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
|
||||
rustsecp256k1_v0_8_1_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_8_1_pre_g, n, WINDOW_G);
|
||||
rustsecp256k1_v0_8_1_gej_add_zinv_var(r, r, &tmpa, &Z);
|
||||
rustsecp256k1_v0_9_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_9_0_pre_g, n, WINDOW_G);
|
||||
rustsecp256k1_v0_9_0_gej_add_zinv_var(r, r, &tmpa, &Z);
|
||||
}
|
||||
if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
|
||||
rustsecp256k1_v0_8_1_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_8_1_pre_g_128, n, WINDOW_G);
|
||||
rustsecp256k1_v0_8_1_gej_add_zinv_var(r, r, &tmpa, &Z);
|
||||
rustsecp256k1_v0_9_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_9_0_pre_g_128, n, WINDOW_G);
|
||||
rustsecp256k1_v0_9_0_gej_add_zinv_var(r, r, &tmpa, &Z);
|
||||
}
|
||||
}
|
||||
|
||||
if (!r->infinity) {
|
||||
rustsecp256k1_v0_8_1_fe_mul(&r->z, &r->z, &Z);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&r->z, &r->z, &Z);
|
||||
}
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_ecmult(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_scalar *na, const rustsecp256k1_v0_8_1_scalar *ng) {
|
||||
rustsecp256k1_v0_8_1_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
rustsecp256k1_v0_8_1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
struct rustsecp256k1_v0_8_1_strauss_point_state ps[1];
|
||||
struct rustsecp256k1_v0_8_1_strauss_state state;
|
||||
static void rustsecp256k1_v0_9_0_ecmult(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_scalar *na, const rustsecp256k1_v0_9_0_scalar *ng) {
|
||||
rustsecp256k1_v0_9_0_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
rustsecp256k1_v0_9_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
|
||||
struct rustsecp256k1_v0_9_0_strauss_point_state ps[1];
|
||||
struct rustsecp256k1_v0_9_0_strauss_state state;
|
||||
|
||||
state.aux = aux;
|
||||
state.pre_a = pre_a;
|
||||
state.ps = ps;
|
||||
rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(&state, r, 1, a, na, ng);
|
||||
rustsecp256k1_v0_9_0_ecmult_strauss_wnaf(&state, r, 1, a, na, ng);
|
||||
}
|
||||
|
||||
static size_t rustsecp256k1_v0_8_1_strauss_scratch_size(size_t n_points) {
|
||||
static const size_t point_size = (sizeof(rustsecp256k1_v0_8_1_ge) + sizeof(rustsecp256k1_v0_8_1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_8_1_strauss_point_state) + sizeof(rustsecp256k1_v0_8_1_gej) + sizeof(rustsecp256k1_v0_8_1_scalar);
|
||||
static size_t rustsecp256k1_v0_9_0_strauss_scratch_size(size_t n_points) {
|
||||
static const size_t point_size = (sizeof(rustsecp256k1_v0_9_0_ge) + sizeof(rustsecp256k1_v0_9_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_9_0_strauss_point_state) + sizeof(rustsecp256k1_v0_9_0_gej) + sizeof(rustsecp256k1_v0_9_0_scalar);
|
||||
return n_points*point_size;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecmult_strauss_batch(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
|
||||
rustsecp256k1_v0_8_1_gej* points;
|
||||
rustsecp256k1_v0_8_1_scalar* scalars;
|
||||
struct rustsecp256k1_v0_8_1_strauss_state state;
|
||||
static int rustsecp256k1_v0_9_0_ecmult_strauss_batch(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
|
||||
rustsecp256k1_v0_9_0_gej* points;
|
||||
rustsecp256k1_v0_9_0_scalar* scalars;
|
||||
struct rustsecp256k1_v0_9_0_strauss_state state;
|
||||
size_t i;
|
||||
const size_t scratch_checkpoint = rustsecp256k1_v0_8_1_scratch_checkpoint(error_callback, scratch);
|
||||
const size_t scratch_checkpoint = rustsecp256k1_v0_9_0_scratch_checkpoint(error_callback, scratch);
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
if (inp_g_sc == NULL && n_points == 0) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -373,37 +375,37 @@ static int rustsecp256k1_v0_8_1_ecmult_strauss_batch(const rustsecp256k1_v0_8_1_
|
|||
/* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these
|
||||
* allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS
|
||||
* constant and strauss_scratch_size accordingly. */
|
||||
points = (rustsecp256k1_v0_8_1_gej*)rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_8_1_gej));
|
||||
scalars = (rustsecp256k1_v0_8_1_scalar*)rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_8_1_scalar));
|
||||
state.aux = (rustsecp256k1_v0_8_1_fe*)rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_8_1_fe));
|
||||
state.pre_a = (rustsecp256k1_v0_8_1_ge*)rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_8_1_ge));
|
||||
state.ps = (struct rustsecp256k1_v0_8_1_strauss_point_state*)rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_8_1_strauss_point_state));
|
||||
points = (rustsecp256k1_v0_9_0_gej*)rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_9_0_gej));
|
||||
scalars = (rustsecp256k1_v0_9_0_scalar*)rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_9_0_scalar));
|
||||
state.aux = (rustsecp256k1_v0_9_0_fe*)rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_9_0_fe));
|
||||
state.pre_a = (rustsecp256k1_v0_9_0_ge*)rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_9_0_ge));
|
||||
state.ps = (struct rustsecp256k1_v0_9_0_strauss_point_state*)rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_9_0_strauss_point_state));
|
||||
|
||||
if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) {
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_points; i++) {
|
||||
rustsecp256k1_v0_8_1_ge point;
|
||||
rustsecp256k1_v0_9_0_ge point;
|
||||
if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&points[i], &point);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&points[i], &point);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc);
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Wrapper for rustsecp256k1_v0_8_1_ecmult_multi_func interface */
|
||||
static int rustsecp256k1_v0_8_1_ecmult_strauss_batch_single(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
return rustsecp256k1_v0_8_1_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
|
||||
/* Wrapper for rustsecp256k1_v0_9_0_ecmult_multi_func interface */
|
||||
static int rustsecp256k1_v0_9_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
return rustsecp256k1_v0_9_0_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
|
||||
}
|
||||
|
||||
static size_t rustsecp256k1_v0_8_1_strauss_max_points(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch) {
|
||||
return rustsecp256k1_v0_8_1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_8_1_strauss_scratch_size(1);
|
||||
static size_t rustsecp256k1_v0_9_0_strauss_max_points(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch) {
|
||||
return rustsecp256k1_v0_9_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_9_0_strauss_scratch_size(1);
|
||||
}
|
||||
|
||||
/** Convert a number to WNAF notation.
|
||||
|
@ -413,25 +415,25 @@ static size_t rustsecp256k1_v0_8_1_strauss_max_points(const rustsecp256k1_v0_8_1
|
|||
* - the number of words set is always WNAF_SIZE(w)
|
||||
* - the returned skew is 0 or 1
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_8_1_scalar *s, int w) {
|
||||
static int rustsecp256k1_v0_9_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_9_0_scalar *s, int w) {
|
||||
int skew = 0;
|
||||
int pos;
|
||||
int max_pos;
|
||||
int last_w;
|
||||
const rustsecp256k1_v0_8_1_scalar *work = s;
|
||||
const rustsecp256k1_v0_9_0_scalar *work = s;
|
||||
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_zero(s)) {
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_zero(s)) {
|
||||
for (pos = 0; pos < WNAF_SIZE(w); pos++) {
|
||||
wnaf[pos] = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_even(s)) {
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_even(s)) {
|
||||
skew = 1;
|
||||
}
|
||||
|
||||
wnaf[0] = rustsecp256k1_v0_8_1_scalar_get_bits_var(work, 0, w) + skew;
|
||||
wnaf[0] = rustsecp256k1_v0_9_0_scalar_get_bits_var(work, 0, w) + skew;
|
||||
/* Compute last window size. Relevant when window size doesn't divide the
|
||||
* number of bits in the scalar */
|
||||
last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w;
|
||||
|
@ -439,7 +441,7 @@ static int rustsecp256k1_v0_8_1_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
/* Store the position of the first nonzero word in max_pos to allow
|
||||
* skipping leading zeros when calculating the wnaf. */
|
||||
for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) {
|
||||
int val = rustsecp256k1_v0_8_1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
|
||||
int val = rustsecp256k1_v0_9_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
|
||||
if(val != 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -449,7 +451,7 @@ static int rustsecp256k1_v0_8_1_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
pos = 1;
|
||||
|
||||
while (pos <= max_pos) {
|
||||
int val = rustsecp256k1_v0_8_1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
|
||||
int val = rustsecp256k1_v0_9_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
|
||||
if ((val & 1) == 0) {
|
||||
wnaf[pos - 1] -= (1 << w);
|
||||
wnaf[pos] = (val + 1);
|
||||
|
@ -475,14 +477,14 @@ static int rustsecp256k1_v0_8_1_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_8_1
|
|||
return skew;
|
||||
}
|
||||
|
||||
struct rustsecp256k1_v0_8_1_pippenger_point_state {
|
||||
struct rustsecp256k1_v0_9_0_pippenger_point_state {
|
||||
int skew_na;
|
||||
size_t input_pos;
|
||||
};
|
||||
|
||||
struct rustsecp256k1_v0_8_1_pippenger_state {
|
||||
struct rustsecp256k1_v0_9_0_pippenger_state {
|
||||
int *wnaf_na;
|
||||
struct rustsecp256k1_v0_8_1_pippenger_point_state* ps;
|
||||
struct rustsecp256k1_v0_9_0_pippenger_point_state* ps;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -492,7 +494,7 @@ struct rustsecp256k1_v0_8_1_pippenger_state {
|
|||
* to the point's wnaf[i]. Second, the buckets are added together such that
|
||||
* r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ...
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_ecmult_pippenger_wnaf(rustsecp256k1_v0_8_1_gej *buckets, int bucket_window, struct rustsecp256k1_v0_8_1_pippenger_state *state, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *sc, const rustsecp256k1_v0_8_1_ge *pt, size_t num) {
|
||||
static int rustsecp256k1_v0_9_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_9_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_9_0_pippenger_state *state, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *sc, const rustsecp256k1_v0_9_0_ge *pt, size_t num) {
|
||||
size_t n_wnaf = WNAF_SIZE(bucket_window+1);
|
||||
size_t np;
|
||||
size_t no = 0;
|
||||
|
@ -500,55 +502,55 @@ static int rustsecp256k1_v0_8_1_ecmult_pippenger_wnaf(rustsecp256k1_v0_8_1_gej *
|
|||
int j;
|
||||
|
||||
for (np = 0; np < num; ++np) {
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_8_1_ge_is_infinity(&pt[np])) {
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_9_0_ge_is_infinity(&pt[np])) {
|
||||
continue;
|
||||
}
|
||||
state->ps[no].input_pos = np;
|
||||
state->ps[no].skew_na = rustsecp256k1_v0_8_1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
|
||||
state->ps[no].skew_na = rustsecp256k1_v0_9_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
|
||||
no++;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
|
||||
if (no == 0) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (i = n_wnaf - 1; i >= 0; i--) {
|
||||
rustsecp256k1_v0_8_1_gej running_sum;
|
||||
rustsecp256k1_v0_9_0_gej running_sum;
|
||||
|
||||
for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) {
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(&buckets[j]);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(&buckets[j]);
|
||||
}
|
||||
|
||||
for (np = 0; np < no; ++np) {
|
||||
int n = state->wnaf_na[np*n_wnaf + i];
|
||||
struct rustsecp256k1_v0_8_1_pippenger_point_state point_state = state->ps[np];
|
||||
rustsecp256k1_v0_8_1_ge tmp;
|
||||
struct rustsecp256k1_v0_9_0_pippenger_point_state point_state = state->ps[np];
|
||||
rustsecp256k1_v0_9_0_ge tmp;
|
||||
int idx;
|
||||
|
||||
if (i == 0) {
|
||||
/* correct for wnaf skew */
|
||||
int skew = point_state.skew_na;
|
||||
if (skew) {
|
||||
rustsecp256k1_v0_8_1_ge_neg(&tmp, &pt[point_state.input_pos]);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
|
||||
rustsecp256k1_v0_9_0_ge_neg(&tmp, &pt[point_state.input_pos]);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
|
||||
}
|
||||
}
|
||||
if (n > 0) {
|
||||
idx = (n - 1)/2;
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL);
|
||||
} else if (n < 0) {
|
||||
idx = -(n + 1)/2;
|
||||
rustsecp256k1_v0_8_1_ge_neg(&tmp, &pt[point_state.input_pos]);
|
||||
rustsecp256k1_v0_8_1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
|
||||
rustsecp256k1_v0_9_0_ge_neg(&tmp, &pt[point_state.input_pos]);
|
||||
rustsecp256k1_v0_9_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
for(j = 0; j < bucket_window; j++) {
|
||||
rustsecp256k1_v0_8_1_gej_double_var(r, r, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(r, r, NULL);
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(&running_sum);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(&running_sum);
|
||||
/* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ...
|
||||
* = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ...
|
||||
* + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...)
|
||||
|
@ -558,13 +560,13 @@ static int rustsecp256k1_v0_8_1_ecmult_pippenger_wnaf(rustsecp256k1_v0_8_1_gej *
|
|||
* The doubling is done implicitly by deferring the final window doubling (of 'r').
|
||||
*/
|
||||
for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) {
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
|
||||
rustsecp256k1_v0_8_1_gej_add_var(r, r, &running_sum, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(r, r, &running_sum, NULL);
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
|
||||
rustsecp256k1_v0_8_1_gej_double_var(r, r, NULL);
|
||||
rustsecp256k1_v0_8_1_gej_add_var(r, r, &running_sum, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
|
||||
rustsecp256k1_v0_9_0_gej_double_var(r, r, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(r, r, &running_sum, NULL);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -573,7 +575,7 @@ static int rustsecp256k1_v0_8_1_ecmult_pippenger_wnaf(rustsecp256k1_v0_8_1_gej *
|
|||
* Returns optimal bucket_window (number of bits of a scalar represented by a
|
||||
* set of buckets) for a given number of points.
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_pippenger_bucket_window(size_t n) {
|
||||
static int rustsecp256k1_v0_9_0_pippenger_bucket_window(size_t n) {
|
||||
if (n <= 1) {
|
||||
return 1;
|
||||
} else if (n <= 4) {
|
||||
|
@ -602,7 +604,7 @@ static int rustsecp256k1_v0_8_1_pippenger_bucket_window(size_t n) {
|
|||
/**
|
||||
* Returns the maximum optimal number of points for a bucket_window.
|
||||
*/
|
||||
static size_t rustsecp256k1_v0_8_1_pippenger_bucket_window_inv(int bucket_window) {
|
||||
static size_t rustsecp256k1_v0_9_0_pippenger_bucket_window_inv(int bucket_window) {
|
||||
switch(bucket_window) {
|
||||
case 1: return 1;
|
||||
case 2: return 4;
|
||||
|
@ -621,18 +623,18 @@ static size_t rustsecp256k1_v0_8_1_pippenger_bucket_window_inv(int bucket_window
|
|||
}
|
||||
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_endo_split(rustsecp256k1_v0_8_1_scalar *s1, rustsecp256k1_v0_8_1_scalar *s2, rustsecp256k1_v0_8_1_ge *p1, rustsecp256k1_v0_8_1_ge *p2) {
|
||||
rustsecp256k1_v0_8_1_scalar tmp = *s1;
|
||||
rustsecp256k1_v0_8_1_scalar_split_lambda(s1, s2, &tmp);
|
||||
rustsecp256k1_v0_8_1_ge_mul_lambda(p2, p1);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_ecmult_endo_split(rustsecp256k1_v0_9_0_scalar *s1, rustsecp256k1_v0_9_0_scalar *s2, rustsecp256k1_v0_9_0_ge *p1, rustsecp256k1_v0_9_0_ge *p2) {
|
||||
rustsecp256k1_v0_9_0_scalar tmp = *s1;
|
||||
rustsecp256k1_v0_9_0_scalar_split_lambda(s1, s2, &tmp);
|
||||
rustsecp256k1_v0_9_0_ge_mul_lambda(p2, p1);
|
||||
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_high(s1)) {
|
||||
rustsecp256k1_v0_8_1_scalar_negate(s1, s1);
|
||||
rustsecp256k1_v0_8_1_ge_neg(p1, p1);
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_high(s1)) {
|
||||
rustsecp256k1_v0_9_0_scalar_negate(s1, s1);
|
||||
rustsecp256k1_v0_9_0_ge_neg(p1, p1);
|
||||
}
|
||||
if (rustsecp256k1_v0_8_1_scalar_is_high(s2)) {
|
||||
rustsecp256k1_v0_8_1_scalar_negate(s2, s2);
|
||||
rustsecp256k1_v0_8_1_ge_neg(p2, p2);
|
||||
if (rustsecp256k1_v0_9_0_scalar_is_high(s2)) {
|
||||
rustsecp256k1_v0_9_0_scalar_negate(s2, s2);
|
||||
rustsecp256k1_v0_9_0_ge_neg(p2, p2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -640,91 +642,91 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_ecmult_endo_split(rustsecp256k
|
|||
* Returns the scratch size required for a given number of points (excluding
|
||||
* base point G) without considering alignment.
|
||||
*/
|
||||
static size_t rustsecp256k1_v0_8_1_pippenger_scratch_size(size_t n_points, int bucket_window) {
|
||||
static size_t rustsecp256k1_v0_9_0_pippenger_scratch_size(size_t n_points, int bucket_window) {
|
||||
size_t entries = 2*n_points + 2;
|
||||
size_t entry_size = sizeof(rustsecp256k1_v0_8_1_ge) + sizeof(rustsecp256k1_v0_8_1_scalar) + sizeof(struct rustsecp256k1_v0_8_1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
|
||||
return (sizeof(rustsecp256k1_v0_8_1_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_8_1_pippenger_state) + entries * entry_size;
|
||||
size_t entry_size = sizeof(rustsecp256k1_v0_9_0_ge) + sizeof(rustsecp256k1_v0_9_0_scalar) + sizeof(struct rustsecp256k1_v0_9_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
|
||||
return (sizeof(rustsecp256k1_v0_9_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_9_0_pippenger_state) + entries * entry_size;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_ecmult_pippenger_batch(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
|
||||
const size_t scratch_checkpoint = rustsecp256k1_v0_8_1_scratch_checkpoint(error_callback, scratch);
|
||||
static int rustsecp256k1_v0_9_0_ecmult_pippenger_batch(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
|
||||
const size_t scratch_checkpoint = rustsecp256k1_v0_9_0_scratch_checkpoint(error_callback, scratch);
|
||||
/* Use 2(n+1) with the endomorphism, when calculating batch
|
||||
* sizes. The reason for +1 is that we add the G scalar to the list of
|
||||
* other scalars. */
|
||||
size_t entries = 2*n_points + 2;
|
||||
rustsecp256k1_v0_8_1_ge *points;
|
||||
rustsecp256k1_v0_8_1_scalar *scalars;
|
||||
rustsecp256k1_v0_8_1_gej *buckets;
|
||||
struct rustsecp256k1_v0_8_1_pippenger_state *state_space;
|
||||
rustsecp256k1_v0_9_0_ge *points;
|
||||
rustsecp256k1_v0_9_0_scalar *scalars;
|
||||
rustsecp256k1_v0_9_0_gej *buckets;
|
||||
struct rustsecp256k1_v0_9_0_pippenger_state *state_space;
|
||||
size_t idx = 0;
|
||||
size_t point_idx = 0;
|
||||
int i, j;
|
||||
int bucket_window;
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
if (inp_g_sc == NULL && n_points == 0) {
|
||||
return 1;
|
||||
}
|
||||
bucket_window = rustsecp256k1_v0_8_1_pippenger_bucket_window(n_points);
|
||||
bucket_window = rustsecp256k1_v0_9_0_pippenger_bucket_window(n_points);
|
||||
|
||||
/* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If
|
||||
* these allocations change, make sure to update the
|
||||
* PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size
|
||||
* accordingly. */
|
||||
points = (rustsecp256k1_v0_8_1_ge *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
|
||||
scalars = (rustsecp256k1_v0_8_1_scalar *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
|
||||
state_space = (struct rustsecp256k1_v0_8_1_pippenger_state *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
|
||||
points = (rustsecp256k1_v0_9_0_ge *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
|
||||
scalars = (rustsecp256k1_v0_9_0_scalar *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
|
||||
state_space = (struct rustsecp256k1_v0_9_0_pippenger_state *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, sizeof(*state_space));
|
||||
if (points == NULL || scalars == NULL || state_space == NULL) {
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 0;
|
||||
}
|
||||
state_space->ps = (struct rustsecp256k1_v0_8_1_pippenger_point_state *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
|
||||
state_space->wnaf_na = (int *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
|
||||
buckets = (rustsecp256k1_v0_8_1_gej *) rustsecp256k1_v0_8_1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
|
||||
state_space->ps = (struct rustsecp256k1_v0_9_0_pippenger_point_state *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
|
||||
state_space->wnaf_na = (int *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
|
||||
buckets = (rustsecp256k1_v0_9_0_gej *) rustsecp256k1_v0_9_0_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets));
|
||||
if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (inp_g_sc != NULL) {
|
||||
scalars[0] = *inp_g_sc;
|
||||
points[0] = rustsecp256k1_v0_8_1_ge_const_g;
|
||||
points[0] = rustsecp256k1_v0_9_0_ge_const_g;
|
||||
idx++;
|
||||
rustsecp256k1_v0_8_1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
|
||||
rustsecp256k1_v0_9_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
|
||||
idx++;
|
||||
}
|
||||
|
||||
while (point_idx < n_points) {
|
||||
if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 0;
|
||||
}
|
||||
idx++;
|
||||
rustsecp256k1_v0_8_1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
|
||||
rustsecp256k1_v0_9_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
|
||||
idx++;
|
||||
point_idx++;
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
|
||||
rustsecp256k1_v0_9_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
|
||||
|
||||
/* Clear data */
|
||||
for(i = 0; (size_t)i < idx; i++) {
|
||||
rustsecp256k1_v0_8_1_scalar_clear(&scalars[i]);
|
||||
rustsecp256k1_v0_9_0_scalar_clear(&scalars[i]);
|
||||
state_space->ps[i].skew_na = 0;
|
||||
for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) {
|
||||
state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0;
|
||||
}
|
||||
}
|
||||
for(i = 0; i < 1<<bucket_window; i++) {
|
||||
rustsecp256k1_v0_8_1_gej_clear(&buckets[i]);
|
||||
rustsecp256k1_v0_9_0_gej_clear(&buckets[i]);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
rustsecp256k1_v0_9_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Wrapper for rustsecp256k1_v0_8_1_ecmult_multi_func interface */
|
||||
static int rustsecp256k1_v0_8_1_ecmult_pippenger_batch_single(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
return rustsecp256k1_v0_8_1_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
|
||||
/* Wrapper for rustsecp256k1_v0_9_0_ecmult_multi_func interface */
|
||||
static int rustsecp256k1_v0_9_0_ecmult_pippenger_batch_single(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
return rustsecp256k1_v0_9_0_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -732,20 +734,20 @@ static int rustsecp256k1_v0_8_1_ecmult_pippenger_batch_single(const rustsecp256k
|
|||
* a given scratch space. The function ensures that fewer points may also be
|
||||
* used.
|
||||
*/
|
||||
static size_t rustsecp256k1_v0_8_1_pippenger_max_points(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch) {
|
||||
size_t max_alloc = rustsecp256k1_v0_8_1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
|
||||
static size_t rustsecp256k1_v0_9_0_pippenger_max_points(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch) {
|
||||
size_t max_alloc = rustsecp256k1_v0_9_0_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
|
||||
int bucket_window;
|
||||
size_t res = 0;
|
||||
|
||||
for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) {
|
||||
size_t n_points;
|
||||
size_t max_points = rustsecp256k1_v0_8_1_pippenger_bucket_window_inv(bucket_window);
|
||||
size_t max_points = rustsecp256k1_v0_9_0_pippenger_bucket_window_inv(bucket_window);
|
||||
size_t space_for_points;
|
||||
size_t space_overhead;
|
||||
size_t entry_size = sizeof(rustsecp256k1_v0_8_1_ge) + sizeof(rustsecp256k1_v0_8_1_scalar) + sizeof(struct rustsecp256k1_v0_8_1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
|
||||
size_t entry_size = sizeof(rustsecp256k1_v0_9_0_ge) + sizeof(rustsecp256k1_v0_9_0_scalar) + sizeof(struct rustsecp256k1_v0_9_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
|
||||
|
||||
entry_size = 2*entry_size;
|
||||
space_overhead = (sizeof(rustsecp256k1_v0_8_1_gej) << bucket_window) + entry_size + sizeof(struct rustsecp256k1_v0_8_1_pippenger_state);
|
||||
space_overhead = (sizeof(rustsecp256k1_v0_9_0_gej) << bucket_window) + entry_size + sizeof(struct rustsecp256k1_v0_9_0_pippenger_state);
|
||||
if (space_overhead > max_alloc) {
|
||||
break;
|
||||
}
|
||||
|
@ -768,34 +770,32 @@ static size_t rustsecp256k1_v0_8_1_pippenger_max_points(const rustsecp256k1_v0_8
|
|||
|
||||
/* Computes ecmult_multi by simply multiplying and adding each point. Does not
|
||||
* require a scratch space */
|
||||
static int rustsecp256k1_v0_8_1_ecmult_multi_simple_var(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
|
||||
static int rustsecp256k1_v0_9_0_ecmult_multi_simple_var(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
|
||||
size_t point_idx;
|
||||
rustsecp256k1_v0_8_1_scalar szero;
|
||||
rustsecp256k1_v0_8_1_gej tmpj;
|
||||
rustsecp256k1_v0_9_0_gej tmpj;
|
||||
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&szero, 0);
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(&tmpj);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(&tmpj);
|
||||
/* r = inp_g_sc*G */
|
||||
rustsecp256k1_v0_8_1_ecmult(r, &tmpj, &szero, inp_g_sc);
|
||||
rustsecp256k1_v0_9_0_ecmult(r, &tmpj, &rustsecp256k1_v0_9_0_scalar_zero, inp_g_sc);
|
||||
for (point_idx = 0; point_idx < n_points; point_idx++) {
|
||||
rustsecp256k1_v0_8_1_ge point;
|
||||
rustsecp256k1_v0_8_1_gej pointj;
|
||||
rustsecp256k1_v0_8_1_scalar scalar;
|
||||
rustsecp256k1_v0_9_0_ge point;
|
||||
rustsecp256k1_v0_9_0_gej pointj;
|
||||
rustsecp256k1_v0_9_0_scalar scalar;
|
||||
if (!cb(&scalar, &point, point_idx, cbdata)) {
|
||||
return 0;
|
||||
}
|
||||
/* r += scalar*point */
|
||||
rustsecp256k1_v0_8_1_gej_set_ge(&pointj, &point);
|
||||
rustsecp256k1_v0_8_1_ecmult(&tmpj, &pointj, &scalar, NULL);
|
||||
rustsecp256k1_v0_8_1_gej_add_var(r, r, &tmpj, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_set_ge(&pointj, &point);
|
||||
rustsecp256k1_v0_9_0_ecmult(&tmpj, &pointj, &scalar, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(r, r, &tmpj, NULL);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Compute the number of batches and the batch size given the maximum batch size and the
|
||||
* total number of points */
|
||||
static int rustsecp256k1_v0_8_1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) {
|
||||
static int rustsecp256k1_v0_9_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) {
|
||||
if (max_n_batch_points == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -813,50 +813,48 @@ static int rustsecp256k1_v0_8_1_ecmult_multi_batch_size_helper(size_t *n_batches
|
|||
return 1;
|
||||
}
|
||||
|
||||
typedef int (*rustsecp256k1_v0_8_1_ecmult_multi_func)(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch*, rustsecp256k1_v0_8_1_gej*, const rustsecp256k1_v0_8_1_scalar*, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void*, size_t);
|
||||
static int rustsecp256k1_v0_8_1_ecmult_multi_var(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch *scratch, rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_scalar *inp_g_sc, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
typedef int (*rustsecp256k1_v0_9_0_ecmult_multi_func)(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch*, rustsecp256k1_v0_9_0_gej*, const rustsecp256k1_v0_9_0_scalar*, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void*, size_t);
|
||||
static int rustsecp256k1_v0_9_0_ecmult_multi_var(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch *scratch, rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_scalar *inp_g_sc, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void *cbdata, size_t n) {
|
||||
size_t i;
|
||||
|
||||
int (*f)(const rustsecp256k1_v0_8_1_callback* error_callback, rustsecp256k1_v0_8_1_scratch*, rustsecp256k1_v0_8_1_gej*, const rustsecp256k1_v0_8_1_scalar*, rustsecp256k1_v0_8_1_ecmult_multi_callback cb, void*, size_t, size_t);
|
||||
int (*f)(const rustsecp256k1_v0_9_0_callback* error_callback, rustsecp256k1_v0_9_0_scratch*, rustsecp256k1_v0_9_0_gej*, const rustsecp256k1_v0_9_0_scalar*, rustsecp256k1_v0_9_0_ecmult_multi_callback cb, void*, size_t, size_t);
|
||||
size_t n_batches;
|
||||
size_t n_batch_points;
|
||||
|
||||
rustsecp256k1_v0_8_1_gej_set_infinity(r);
|
||||
rustsecp256k1_v0_9_0_gej_set_infinity(r);
|
||||
if (inp_g_sc == NULL && n == 0) {
|
||||
return 1;
|
||||
} else if (n == 0) {
|
||||
rustsecp256k1_v0_8_1_scalar szero;
|
||||
rustsecp256k1_v0_8_1_scalar_set_int(&szero, 0);
|
||||
rustsecp256k1_v0_8_1_ecmult(r, r, &szero, inp_g_sc);
|
||||
rustsecp256k1_v0_9_0_ecmult(r, r, &rustsecp256k1_v0_9_0_scalar_zero, inp_g_sc);
|
||||
return 1;
|
||||
}
|
||||
if (scratch == NULL) {
|
||||
return rustsecp256k1_v0_8_1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
return rustsecp256k1_v0_9_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
}
|
||||
|
||||
/* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than
|
||||
* a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
|
||||
* As a first step check if there's enough space for Pippenger's algo (which requires less space
|
||||
* than Strauss' algo) and if not, use the simple algorithm. */
|
||||
if (!rustsecp256k1_v0_8_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_8_1_pippenger_max_points(error_callback, scratch), n)) {
|
||||
return rustsecp256k1_v0_8_1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
if (!rustsecp256k1_v0_9_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_9_0_pippenger_max_points(error_callback, scratch), n)) {
|
||||
return rustsecp256k1_v0_9_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
}
|
||||
if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
|
||||
f = rustsecp256k1_v0_8_1_ecmult_pippenger_batch;
|
||||
f = rustsecp256k1_v0_9_0_ecmult_pippenger_batch;
|
||||
} else {
|
||||
if (!rustsecp256k1_v0_8_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_8_1_strauss_max_points(error_callback, scratch), n)) {
|
||||
return rustsecp256k1_v0_8_1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
if (!rustsecp256k1_v0_9_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_9_0_strauss_max_points(error_callback, scratch), n)) {
|
||||
return rustsecp256k1_v0_9_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
|
||||
}
|
||||
f = rustsecp256k1_v0_8_1_ecmult_strauss_batch;
|
||||
f = rustsecp256k1_v0_9_0_ecmult_strauss_batch;
|
||||
}
|
||||
for(i = 0; i < n_batches; i++) {
|
||||
size_t nbp = n < n_batch_points ? n : n_batch_points;
|
||||
size_t offset = n_batch_points*i;
|
||||
rustsecp256k1_v0_8_1_gej tmp;
|
||||
rustsecp256k1_v0_9_0_gej tmp;
|
||||
if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
|
||||
return 0;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_gej_add_var(r, r, &tmp, NULL);
|
||||
rustsecp256k1_v0_9_0_gej_add_var(r, r, &tmp, NULL);
|
||||
n -= nbp;
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -7,23 +7,36 @@
|
|||
#ifndef SECP256K1_FIELD_H
|
||||
#define SECP256K1_FIELD_H
|
||||
|
||||
/** Field element module.
|
||||
*
|
||||
* Field elements can be represented in several ways, but code accessing
|
||||
* it (and implementations) need to take certain properties into account:
|
||||
* - Each field element can be normalized or not.
|
||||
* - Each field element has a magnitude, which represents how far away
|
||||
* its representation is away from normalization. Normalized elements
|
||||
* always have a magnitude of 0 or 1, but a magnitude of 1 doesn't
|
||||
* imply normality.
|
||||
*/
|
||||
|
||||
#if defined HAVE_CONFIG_H
|
||||
#include "libsecp256k1-config.h"
|
||||
#endif
|
||||
|
||||
#include "util.h"
|
||||
|
||||
/* This file defines the generic interface for working with rustsecp256k1_v0_9_0_fe
|
||||
* objects, which represent field elements (integers modulo 2^256 - 2^32 - 977).
|
||||
*
|
||||
* The actual definition of the rustsecp256k1_v0_9_0_fe type depends on the chosen field
|
||||
* implementation; see the field_5x52.h and field_10x26.h files for details.
|
||||
*
|
||||
* All rustsecp256k1_v0_9_0_fe objects have implicit properties that determine what
|
||||
* operations are permitted on it. These are purely a function of what
|
||||
* rustsecp256k1_v0_9_0_fe_ operations are applied on it, generally (implicitly) fixed at
|
||||
* compile time, and do not depend on the chosen field implementation. Despite
|
||||
* that, what these properties actually entail for the field representation
|
||||
* values depends on the chosen field implementation. These properties are:
|
||||
* - magnitude: an integer in [0,32]
|
||||
* - normalized: 0 or 1; normalized=1 implies magnitude <= 1.
|
||||
*
|
||||
* In VERIFY mode, they are materialized explicitly as fields in the struct,
|
||||
* allowing run-time verification of these properties. In that case, the field
|
||||
* implementation also provides a rustsecp256k1_v0_9_0_fe_verify routine to verify that
|
||||
* these fields match the run-time value and perform internal consistency
|
||||
* checks. */
|
||||
#ifdef VERIFY
|
||||
# define SECP256K1_FE_VERIFY_FIELDS \
|
||||
int magnitude; \
|
||||
int normalized;
|
||||
#else
|
||||
# define SECP256K1_FE_VERIFY_FIELDS
|
||||
#endif
|
||||
|
||||
#if defined(SECP256K1_WIDEMUL_INT128)
|
||||
#include "field_5x52.h"
|
||||
#elif defined(SECP256K1_WIDEMUL_INT64)
|
||||
|
@ -32,111 +45,308 @@
|
|||
#error "Please select wide multiplication implementation"
|
||||
#endif
|
||||
|
||||
static const rustsecp256k1_v0_8_1_fe rustsecp256k1_v0_8_1_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
|
||||
static const rustsecp256k1_v0_8_1_fe rustsecp256k1_v0_8_1_const_beta = SECP256K1_FE_CONST(
|
||||
#ifdef VERIFY
|
||||
/* Magnitude and normalized value for constants. */
|
||||
#define SECP256K1_FE_VERIFY_CONST(d7, d6, d5, d4, d3, d2, d1, d0) \
|
||||
/* Magnitude is 0 for constant 0; 1 otherwise. */ \
|
||||
, (((d7) | (d6) | (d5) | (d4) | (d3) | (d2) | (d1) | (d0)) != 0) \
|
||||
/* Normalized is 1 unless sum(d_i<<(32*i) for i=0..7) exceeds field modulus. */ \
|
||||
, (!(((d7) & (d6) & (d5) & (d4) & (d3) & (d2)) == 0xfffffffful && ((d1) == 0xfffffffful || ((d1) == 0xfffffffe && (d0 >= 0xfffffc2f)))))
|
||||
#else
|
||||
#define SECP256K1_FE_VERIFY_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
|
||||
#endif
|
||||
|
||||
/** This expands to an initializer for a rustsecp256k1_v0_9_0_fe valued sum((i*32) * d_i, i=0..7) mod p.
|
||||
*
|
||||
* It has magnitude 1, unless d_i are all 0, in which case the magnitude is 0.
|
||||
* It is normalized, unless sum(2^(i*32) * d_i, i=0..7) >= p.
|
||||
*
|
||||
* SECP256K1_FE_CONST_INNER is provided by the implementation.
|
||||
*/
|
||||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)) SECP256K1_FE_VERIFY_CONST((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)) }
|
||||
|
||||
static const rustsecp256k1_v0_9_0_fe rustsecp256k1_v0_9_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
|
||||
static const rustsecp256k1_v0_9_0_fe rustsecp256k1_v0_9_0_const_beta = SECP256K1_FE_CONST(
|
||||
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
|
||||
0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
|
||||
);
|
||||
|
||||
/** Normalize a field element. This brings the field element to a canonical representation, reduces
|
||||
* its magnitude to 1, and reduces it modulo field size `p`.
|
||||
#ifndef VERIFY
|
||||
/* In non-VERIFY mode, we #define the fe operations to be identical to their
|
||||
* internal field implementation, to avoid the potential overhead of a
|
||||
* function call (even though presumably inlinable). */
|
||||
# define rustsecp256k1_v0_9_0_fe_normalize rustsecp256k1_v0_9_0_fe_impl_normalize
|
||||
# define rustsecp256k1_v0_9_0_fe_normalize_weak rustsecp256k1_v0_9_0_fe_impl_normalize_weak
|
||||
# define rustsecp256k1_v0_9_0_fe_normalize_var rustsecp256k1_v0_9_0_fe_impl_normalize_var
|
||||
# define rustsecp256k1_v0_9_0_fe_normalizes_to_zero rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero
|
||||
# define rustsecp256k1_v0_9_0_fe_normalizes_to_zero_var rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero_var
|
||||
# define rustsecp256k1_v0_9_0_fe_set_int rustsecp256k1_v0_9_0_fe_impl_set_int
|
||||
# define rustsecp256k1_v0_9_0_fe_clear rustsecp256k1_v0_9_0_fe_impl_clear
|
||||
# define rustsecp256k1_v0_9_0_fe_is_zero rustsecp256k1_v0_9_0_fe_impl_is_zero
|
||||
# define rustsecp256k1_v0_9_0_fe_is_odd rustsecp256k1_v0_9_0_fe_impl_is_odd
|
||||
# define rustsecp256k1_v0_9_0_fe_cmp_var rustsecp256k1_v0_9_0_fe_impl_cmp_var
|
||||
# define rustsecp256k1_v0_9_0_fe_set_b32_mod rustsecp256k1_v0_9_0_fe_impl_set_b32_mod
|
||||
# define rustsecp256k1_v0_9_0_fe_set_b32_limit rustsecp256k1_v0_9_0_fe_impl_set_b32_limit
|
||||
# define rustsecp256k1_v0_9_0_fe_get_b32 rustsecp256k1_v0_9_0_fe_impl_get_b32
|
||||
# define rustsecp256k1_v0_9_0_fe_negate_unchecked rustsecp256k1_v0_9_0_fe_impl_negate_unchecked
|
||||
# define rustsecp256k1_v0_9_0_fe_mul_int_unchecked rustsecp256k1_v0_9_0_fe_impl_mul_int_unchecked
|
||||
# define rustsecp256k1_v0_9_0_fe_add rustsecp256k1_v0_9_0_fe_impl_add
|
||||
# define rustsecp256k1_v0_9_0_fe_mul rustsecp256k1_v0_9_0_fe_impl_mul
|
||||
# define rustsecp256k1_v0_9_0_fe_sqr rustsecp256k1_v0_9_0_fe_impl_sqr
|
||||
# define rustsecp256k1_v0_9_0_fe_cmov rustsecp256k1_v0_9_0_fe_impl_cmov
|
||||
# define rustsecp256k1_v0_9_0_fe_to_storage rustsecp256k1_v0_9_0_fe_impl_to_storage
|
||||
# define rustsecp256k1_v0_9_0_fe_from_storage rustsecp256k1_v0_9_0_fe_impl_from_storage
|
||||
# define rustsecp256k1_v0_9_0_fe_inv rustsecp256k1_v0_9_0_fe_impl_inv
|
||||
# define rustsecp256k1_v0_9_0_fe_inv_var rustsecp256k1_v0_9_0_fe_impl_inv_var
|
||||
# define rustsecp256k1_v0_9_0_fe_get_bounds rustsecp256k1_v0_9_0_fe_impl_get_bounds
|
||||
# define rustsecp256k1_v0_9_0_fe_half rustsecp256k1_v0_9_0_fe_impl_half
|
||||
# define rustsecp256k1_v0_9_0_fe_add_int rustsecp256k1_v0_9_0_fe_impl_add_int
|
||||
# define rustsecp256k1_v0_9_0_fe_is_square_var rustsecp256k1_v0_9_0_fe_impl_is_square_var
|
||||
#endif /* !defined(VERIFY) */
|
||||
|
||||
/** Normalize a field element.
|
||||
*
|
||||
* On input, r must be a valid field element.
|
||||
* On output, r represents the same value but has normalized=1 and magnitude=1.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize(rustsecp256k1_v0_8_1_fe *r);
|
||||
static void rustsecp256k1_v0_9_0_fe_normalize(rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Weakly normalize a field element: reduce its magnitude to 1, but don't fully normalize. */
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_weak(rustsecp256k1_v0_8_1_fe *r);
|
||||
|
||||
/** Normalize a field element, without constant-time guarantee. */
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_var(rustsecp256k1_v0_8_1_fe *r);
|
||||
|
||||
/** Verify whether a field element represents zero i.e. would normalize to a zero value. */
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero(const rustsecp256k1_v0_8_1_fe *r);
|
||||
|
||||
/** Verify whether a field element represents zero i.e. would normalize to a zero value,
|
||||
* without constant-time guarantee. */
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_1_fe *r);
|
||||
|
||||
/** Set a field element equal to a small (not greater than 0x7FFF), non-negative integer.
|
||||
* Resulting field element is normalized; it has magnitude 0 if a == 0, and magnitude 1 otherwise.
|
||||
/** Give a field element magnitude 1.
|
||||
*
|
||||
* On input, r must be a valid field element.
|
||||
* On output, r represents the same value but has magnitude=1. Normalized is unchanged.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_fe_set_int(rustsecp256k1_v0_8_1_fe *r, int a);
|
||||
static void rustsecp256k1_v0_9_0_fe_normalize_weak(rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Sets a field element equal to zero, initializing all fields. */
|
||||
static void rustsecp256k1_v0_8_1_fe_clear(rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Normalize a field element, without constant-time guarantee.
|
||||
*
|
||||
* Identical in behavior to rustsecp256k1_v0_9_0_fe_normalize, but not constant time in r.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_normalize_var(rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Verify whether a field element is zero. Requires the input to be normalized. */
|
||||
static int rustsecp256k1_v0_8_1_fe_is_zero(const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Determine whether r represents field element 0.
|
||||
*
|
||||
* On input, r must be a valid field element.
|
||||
* Returns whether r = 0 (mod p).
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_normalizes_to_zero(const rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Check the "oddness" of a field element. Requires the input to be normalized. */
|
||||
static int rustsecp256k1_v0_8_1_fe_is_odd(const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Determine whether r represents field element 0, without constant-time guarantee.
|
||||
*
|
||||
* Identical in behavior to rustsecp256k1_v0_9_0_normalizes_to_zero, but not constant time in r.
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Compare two field elements. Requires magnitude-1 inputs. */
|
||||
static int rustsecp256k1_v0_8_1_fe_equal(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b);
|
||||
/** Set a field element to an integer in range [0,0x7FFF].
|
||||
*
|
||||
* On input, r does not need to be initialized, a must be in [0,0x7FFF].
|
||||
* On output, r represents value a, is normalized and has magnitude (a!=0).
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_set_int(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
|
||||
/** Same as rustsecp256k1_v0_8_1_fe_equal, but may be variable time. */
|
||||
static int rustsecp256k1_v0_8_1_fe_equal_var(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b);
|
||||
/** Set a field element to 0.
|
||||
*
|
||||
* On input, a does not need to be initialized.
|
||||
* On output, a represents 0, is normalized and has magnitude 0.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_clear(rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Compare two field elements. Requires both inputs to be normalized */
|
||||
static int rustsecp256k1_v0_8_1_fe_cmp_var(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b);
|
||||
/** Determine whether a represents field element 0.
|
||||
*
|
||||
* On input, a must be a valid normalized field element.
|
||||
* Returns whether a = 0 (mod p).
|
||||
*
|
||||
* This behaves identical to rustsecp256k1_v0_9_0_normalizes_to_zero{,_var}, but requires
|
||||
* normalized input (and is much faster).
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_is_zero(const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */
|
||||
static int rustsecp256k1_v0_8_1_fe_set_b32(rustsecp256k1_v0_8_1_fe *r, const unsigned char *a);
|
||||
/** Determine whether a (mod p) is odd.
|
||||
*
|
||||
* On input, a must be a valid normalized field element.
|
||||
* Returns (int(a) mod p) & 1.
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_is_odd(const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
|
||||
static void rustsecp256k1_v0_8_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Determine whether two field elements are equal.
|
||||
*
|
||||
* On input, a and b must be valid field elements with magnitudes not exceeding
|
||||
* 1 and 31, respectively.
|
||||
* Returns a = b (mod p).
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_equal(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b);
|
||||
|
||||
/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input
|
||||
* as an argument. The magnitude of the output is one higher. */
|
||||
static void rustsecp256k1_v0_8_1_fe_negate(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int m);
|
||||
/** Compare the values represented by 2 field elements, without constant-time guarantee.
|
||||
*
|
||||
* On input, a and b must be valid normalized field elements.
|
||||
* Returns 1 if a > b, -1 if a < b, and 0 if a = b (comparisons are done as integers
|
||||
* in range 0..p-1).
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_cmp_var(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b);
|
||||
|
||||
/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that
|
||||
* small integer. */
|
||||
static void rustsecp256k1_v0_8_1_fe_mul_int(rustsecp256k1_v0_8_1_fe *r, int a);
|
||||
/** Set a field element equal to a provided 32-byte big endian value, reducing it.
|
||||
*
|
||||
* On input, r does not need to be initialized. a must be a pointer to an initialized 32-byte array.
|
||||
* On output, r = a (mod p). It will have magnitude 1, and not be normalized.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_set_b32_mod(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a);
|
||||
|
||||
/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */
|
||||
static void rustsecp256k1_v0_8_1_fe_add(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Set a field element equal to a provided 32-byte big endian value, checking for overflow.
|
||||
*
|
||||
* On input, r does not need to be initialized. a must be a pointer to an initialized 32-byte array.
|
||||
* On output, r = a if (a < p), it will be normalized with magnitude 1, and 1 is returned.
|
||||
* If a >= p, 0 is returned, and r will be made invalid (and must not be used without overwriting).
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_set_b32_limit(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a);
|
||||
|
||||
/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8.
|
||||
* The output magnitude is 1 (but not guaranteed to be normalized). */
|
||||
static void rustsecp256k1_v0_8_1_fe_mul(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe * SECP256K1_RESTRICT b);
|
||||
/** Convert a field element to 32-byte big endian byte array.
|
||||
* On input, a must be a valid normalized field element, and r a pointer to a 32-byte array.
|
||||
* On output, r = a (mod p).
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8.
|
||||
* The output magnitude is 1 (but not guaranteed to be normalized). */
|
||||
static void rustsecp256k1_v0_8_1_fe_sqr(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Negate a field element.
|
||||
*
|
||||
* On input, r does not need to be initialized. a must be a valid field element with
|
||||
* magnitude not exceeding m. m must be an integer constant expression in [0,31].
|
||||
* Performs {r = -a}.
|
||||
* On output, r will not be normalized, and will have magnitude m+1.
|
||||
*/
|
||||
#define rustsecp256k1_v0_9_0_fe_negate(r, a, m) ASSERT_INT_CONST_AND_DO(m, rustsecp256k1_v0_9_0_fe_negate_unchecked(r, a, m))
|
||||
|
||||
/** If a has a square root, it is computed in r and 1 is returned. If a does not
|
||||
* have a square root, the root of its negation is computed and 0 is returned.
|
||||
* The input's magnitude can be at most 8. The output magnitude is 1 (but not
|
||||
* guaranteed to be normalized). The result in r will always be a square
|
||||
* itself. */
|
||||
static int rustsecp256k1_v0_8_1_fe_sqrt(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Like rustsecp256k1_v0_9_0_fe_negate_unchecked but m is not checked to be an integer constant expression.
|
||||
*
|
||||
* Should not be called directly outside of tests.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_negate_unchecked(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int m);
|
||||
|
||||
/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be
|
||||
* at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */
|
||||
static void rustsecp256k1_v0_8_1_fe_inv(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Add a small integer to a field element.
|
||||
*
|
||||
* Performs {r += a}. The magnitude of r increases by 1, and normalized is cleared.
|
||||
* a must be in range [0,0x7FFF].
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_add_int(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
|
||||
/** Potentially faster version of rustsecp256k1_v0_8_1_fe_inv, without constant-time guarantee. */
|
||||
static void rustsecp256k1_v0_8_1_fe_inv_var(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Multiply a field element with a small integer.
|
||||
*
|
||||
* On input, r must be a valid field element. a must be an integer constant expression in [0,32].
|
||||
* The magnitude of r times a must not exceed 32.
|
||||
* Performs {r *= a}.
|
||||
* On output, r's magnitude is multiplied by a, and r will not be normalized.
|
||||
*/
|
||||
#define rustsecp256k1_v0_9_0_fe_mul_int(r, a) ASSERT_INT_CONST_AND_DO(a, rustsecp256k1_v0_9_0_fe_mul_int_unchecked(r, a))
|
||||
|
||||
/** Convert a field element to the storage type. */
|
||||
static void rustsecp256k1_v0_8_1_fe_to_storage(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe *a);
|
||||
/** Like rustsecp256k1_v0_9_0_fe_mul_int but a is not checked to be an integer constant expression.
|
||||
*
|
||||
* Should not be called directly outside of tests.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_mul_int_unchecked(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
|
||||
/** Convert a field element back from the storage type. */
|
||||
static void rustsecp256k1_v0_8_1_fe_from_storage(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe_storage *a);
|
||||
/** Increment a field element by another.
|
||||
*
|
||||
* On input, r and a must be valid field elements, not necessarily normalized.
|
||||
* The sum of their magnitudes must not exceed 32.
|
||||
* Performs {r += a}.
|
||||
* On output, r will not be normalized, and will have magnitude incremented by a's.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_add(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Multiply two field elements.
|
||||
*
|
||||
* On input, a and b must be valid field elements; r does not need to be initialized.
|
||||
* r and a may point to the same object, but neither can be equal to b. The magnitudes
|
||||
* of a and b must not exceed 8.
|
||||
* Performs {r = a * b}
|
||||
* On output, r will have magnitude 1, but won't be normalized.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_mul(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT b);
|
||||
|
||||
/** Square a field element.
|
||||
*
|
||||
* On input, a must be a valid field element; r does not need to be initialized. The magnitude
|
||||
* of a must not exceed 8.
|
||||
* Performs {r = a**2}
|
||||
* On output, r will have magnitude 1, but won't be normalized.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_sqr(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Compute a square root of a field element.
|
||||
*
|
||||
* On input, a must be a valid field element with magnitude<=8; r need not be initialized.
|
||||
* If sqrt(a) exists, performs {r = sqrt(a)} and returns 1.
|
||||
* Otherwise, sqrt(-a) exists. The function performs {r = sqrt(-a)} and returns 0.
|
||||
* The resulting value represented by r will be a square itself.
|
||||
* Variables r and a must not point to the same object.
|
||||
* On output, r will have magnitude 1 but will not be normalized.
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_sqrt(rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT a);
|
||||
|
||||
/** Compute the modular inverse of a field element.
|
||||
*
|
||||
* On input, a must be a valid field element; r need not be initialized.
|
||||
* Performs {r = a**(p-2)} (which maps 0 to 0, and every other element to its
|
||||
* inverse).
|
||||
* On output, r will have magnitude (a.magnitude != 0) and be normalized.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_inv(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Compute the modular inverse of a field element, without constant-time guarantee.
|
||||
*
|
||||
* Behaves identically to rustsecp256k1_v0_9_0_fe_inv, but is not constant-time in a.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_inv_var(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Convert a field element to rustsecp256k1_v0_9_0_fe_storage.
|
||||
*
|
||||
* On input, a must be a valid normalized field element.
|
||||
* Performs {r = a}.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_to_storage(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Convert a field element back from rustsecp256k1_v0_9_0_fe_storage.
|
||||
*
|
||||
* On input, r need not be initialized.
|
||||
* Performs {r = a}.
|
||||
* On output, r will be normalized and will have magnitude 1.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_from_storage(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe_storage *a);
|
||||
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
|
||||
static void rustsecp256k1_v0_8_1_fe_storage_cmov(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe_storage *a, int flag);
|
||||
static void rustsecp256k1_v0_9_0_fe_storage_cmov(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe_storage *a, int flag);
|
||||
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
|
||||
static void rustsecp256k1_v0_8_1_fe_cmov(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int flag);
|
||||
/** Conditionally move a field element in constant time.
|
||||
*
|
||||
* On input, both r and a must be valid field elements. Flag must be 0 or 1.
|
||||
* Performs {r = flag ? a : r}.
|
||||
*
|
||||
* On output, r's magnitude will be the maximum of both input magnitudes.
|
||||
* It will be normalized if and only if both inputs were normalized.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_cmov(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int flag);
|
||||
|
||||
/** Halves the value of a field element modulo the field prime. Constant-time.
|
||||
* For an input magnitude 'm', the output magnitude is set to 'floor(m/2) + 1'.
|
||||
* The output is not guaranteed to be normalized, regardless of the input. */
|
||||
static void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_fe *r);
|
||||
/** Halve the value of a field element modulo the field prime in constant-time.
|
||||
*
|
||||
* On input, r must be a valid field element.
|
||||
* On output, r will be normalized and have magnitude floor(m/2) + 1 where m is
|
||||
* the magnitude of r on input.
|
||||
*/
|
||||
static void rustsecp256k1_v0_9_0_fe_half(rustsecp256k1_v0_9_0_fe *r);
|
||||
|
||||
/** Sets each limb of 'r' to its upper bound at magnitude 'm'. The output will also have its
|
||||
* magnitude set to 'm' and is normalized if (and only if) 'm' is zero. */
|
||||
static void rustsecp256k1_v0_8_1_fe_get_bounds(rustsecp256k1_v0_8_1_fe *r, int m);
|
||||
/** Sets r to a field element with magnitude m, normalized if (and only if) m==0.
|
||||
* The value is chosen so that it is likely to trigger edge cases related to
|
||||
* internal overflows. */
|
||||
static void rustsecp256k1_v0_9_0_fe_get_bounds(rustsecp256k1_v0_9_0_fe *r, int m);
|
||||
|
||||
/** Determine whether a is a square (modulo p).
|
||||
*
|
||||
* On input, a must be a valid field element.
|
||||
*/
|
||||
static int rustsecp256k1_v0_9_0_fe_is_square_var(const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Check invariants on a field element (no-op unless VERIFY is enabled). */
|
||||
static void rustsecp256k1_v0_9_0_fe_verify(const rustsecp256k1_v0_9_0_fe *a);
|
||||
|
||||
/** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */
|
||||
static void rustsecp256k1_v0_9_0_fe_verify_magnitude(const rustsecp256k1_v0_9_0_fe *a, int m);
|
||||
|
||||
#endif /* SECP256K1_FIELD_H */
|
||||
|
|
|
@ -9,16 +9,29 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
/** This field implementation represents the value as 10 uint32_t limbs in base
|
||||
* 2^26. */
|
||||
typedef struct {
|
||||
/* X = sum(i=0..9, n[i]*2^(i*26)) mod p
|
||||
* where p = 2^256 - 0x1000003D1
|
||||
*/
|
||||
/* A field element f represents the sum(i=0..9, f.n[i] << (i*26)) mod p,
|
||||
* where p is the field modulus, 2^256 - 2^32 - 977.
|
||||
*
|
||||
* The individual limbs f.n[i] can exceed 2^26; the field's magnitude roughly
|
||||
* corresponds to how much excess is allowed. The value
|
||||
* sum(i=0..9, f.n[i] << (i*26)) may exceed p, unless the field element is
|
||||
* normalized. */
|
||||
uint32_t n[10];
|
||||
#ifdef VERIFY
|
||||
int magnitude;
|
||||
int normalized;
|
||||
#endif
|
||||
} rustsecp256k1_v0_8_1_fe;
|
||||
/*
|
||||
* Magnitude m requires:
|
||||
* n[i] <= 2 * m * (2^26 - 1) for i=0..8
|
||||
* n[9] <= 2 * m * (2^22 - 1)
|
||||
*
|
||||
* Normalized requires:
|
||||
* n[i] <= (2^26 - 1) for i=0..8
|
||||
* sum(i=0..9, n[i] << (i*26)) < p
|
||||
* (together these imply n[9] <= 2^22 - 1)
|
||||
*/
|
||||
SECP256K1_FE_VERIFY_FIELDS
|
||||
} rustsecp256k1_v0_9_0_fe;
|
||||
|
||||
/* Unpacks a constant into a overlapping multi-limbed FE element. */
|
||||
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
|
||||
|
@ -34,15 +47,9 @@ typedef struct {
|
|||
(((uint32_t)d7) >> 10) \
|
||||
}
|
||||
|
||||
#ifdef VERIFY
|
||||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1}
|
||||
#else
|
||||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
uint32_t n[8];
|
||||
} rustsecp256k1_v0_8_1_fe_storage;
|
||||
} rustsecp256k1_v0_9_0_fe_storage;
|
||||
|
||||
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}
|
||||
#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0]
|
||||
|
|
|
@ -7,51 +7,37 @@
|
|||
#ifndef SECP256K1_FIELD_REPR_IMPL_H
|
||||
#define SECP256K1_FIELD_REPR_IMPL_H
|
||||
|
||||
#include "checkmem.h"
|
||||
#include "util.h"
|
||||
#include "field.h"
|
||||
#include "modinv32_impl.h"
|
||||
|
||||
/** See the comment at the top of field_5x52_impl.h for more details.
|
||||
*
|
||||
* Here, we represent field elements as 10 uint32_t's in base 2^26, least significant first,
|
||||
* where limbs can contain >26 bits.
|
||||
* A magnitude M means:
|
||||
* - 2*M*(2^22-1) is the max (inclusive) of the most significant limb
|
||||
* - 2*M*(2^26-1) is the max (inclusive) of the remaining limbs
|
||||
*/
|
||||
|
||||
#ifdef VERIFY
|
||||
static void rustsecp256k1_v0_8_1_fe_verify(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_verify(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint32_t *d = a->n;
|
||||
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
|
||||
r &= (d[0] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[1] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[2] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[3] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[4] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[5] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[6] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[7] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[8] <= 0x3FFFFFFUL * m);
|
||||
r &= (d[9] <= 0x03FFFFFUL * m);
|
||||
r &= (a->magnitude >= 0);
|
||||
r &= (a->magnitude <= 32);
|
||||
int m = a->normalized ? 1 : 2 * a->magnitude;
|
||||
VERIFY_CHECK(d[0] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[1] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[2] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[3] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[4] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[5] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[6] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[7] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[8] <= 0x3FFFFFFUL * m);
|
||||
VERIFY_CHECK(d[9] <= 0x03FFFFFUL * m);
|
||||
if (a->normalized) {
|
||||
r &= (a->magnitude <= 1);
|
||||
if (r && (d[9] == 0x03FFFFFUL)) {
|
||||
if (d[9] == 0x03FFFFFUL) {
|
||||
uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
|
||||
if (mid == 0x3FFFFFFUL) {
|
||||
r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
|
||||
VERIFY_CHECK((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
|
||||
}
|
||||
}
|
||||
}
|
||||
VERIFY_CHECK(r == 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_get_bounds(rustsecp256k1_v0_8_1_fe *r, int m) {
|
||||
VERIFY_CHECK(m >= 0);
|
||||
VERIFY_CHECK(m <= 2048);
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_bounds(rustsecp256k1_v0_9_0_fe *r, int m) {
|
||||
r->n[0] = 0x3FFFFFFUL * 2 * m;
|
||||
r->n[1] = 0x3FFFFFFUL * 2 * m;
|
||||
r->n[2] = 0x3FFFFFFUL * 2 * m;
|
||||
|
@ -62,14 +48,9 @@ static void rustsecp256k1_v0_8_1_fe_get_bounds(rustsecp256k1_v0_8_1_fe *r, int m
|
|||
r->n[7] = 0x3FFFFFFUL * 2 * m;
|
||||
r->n[8] = 0x3FFFFFFUL * 2 * m;
|
||||
r->n[9] = 0x03FFFFFUL * 2 * m;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = m;
|
||||
r->normalized = (m == 0);
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
|
||||
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
|
||||
|
||||
|
@ -116,15 +97,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize(rustsecp256k1_v0_8_1_fe *r) {
|
|||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_weak(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_weak(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
|
||||
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
|
||||
|
||||
|
@ -148,14 +123,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize_weak(rustsecp256k1_v0_8_1_fe *r) {
|
|||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_var(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_var(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
|
||||
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
|
||||
|
||||
|
@ -203,15 +173,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize_var(rustsecp256k1_v0_8_1_fe *r) {
|
|||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero(const rustsecp256k1_v0_8_1_fe *r) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
|
||||
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
|
||||
|
||||
|
@ -240,7 +204,7 @@ static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero(const rustsecp256k1_v0_8_1
|
|||
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_1_fe *r) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
|
||||
uint32_t z0, z1;
|
||||
uint32_t x;
|
||||
|
@ -292,53 +256,29 @@ static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0
|
|||
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_set_int(rustsecp256k1_v0_8_1_fe *r, int a) {
|
||||
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_set_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] = a;
|
||||
r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = (a != 0);
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_is_zero(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_impl_is_zero(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint32_t *t = a->n;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_is_odd(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_impl_is_odd(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
return a->n[0] & 1;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_clear(rustsecp256k1_v0_8_1_fe *a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_clear(rustsecp256k1_v0_9_0_fe *a) {
|
||||
int i;
|
||||
#ifdef VERIFY
|
||||
a->magnitude = 0;
|
||||
a->normalized = 1;
|
||||
#endif
|
||||
for (i=0; i<10; i++) {
|
||||
a->n[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_cmp_var(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_cmp_var(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b) {
|
||||
int i;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
VERIFY_CHECK(b->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
rustsecp256k1_v0_8_1_fe_verify(b);
|
||||
#endif
|
||||
for (i = 9; i >= 0; i--) {
|
||||
if (a->n[i] > b->n[i]) {
|
||||
return 1;
|
||||
|
@ -350,8 +290,7 @@ static int rustsecp256k1_v0_8_1_fe_cmp_var(const rustsecp256k1_v0_8_1_fe *a, con
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_set_b32(rustsecp256k1_v0_8_1_fe *r, const unsigned char *a) {
|
||||
int ret;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24);
|
||||
r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22);
|
||||
r->n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20);
|
||||
|
@ -362,26 +301,15 @@ static int rustsecp256k1_v0_8_1_fe_set_b32(rustsecp256k1_v0_8_1_fe *r, const uns
|
|||
r->n[7] = (uint32_t)((a[9] >> 6) & 0x3) | ((uint32_t)a[8] << 2) | ((uint32_t)a[7] << 10) | ((uint32_t)a[6] << 18);
|
||||
r->n[8] = (uint32_t)a[5] | ((uint32_t)a[4] << 8) | ((uint32_t)a[3] << 16) | ((uint32_t)(a[2] & 0x3) << 24);
|
||||
r->n[9] = (uint32_t)((a[2] >> 2) & 0x3f) | ((uint32_t)a[1] << 6) | ((uint32_t)a[0] << 14);
|
||||
|
||||
ret = !((r->n[9] == 0x3FFFFFUL) & ((r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL) & ((r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
if (ret) {
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
} else {
|
||||
r->normalized = 0;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_set_b32_limit(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(r, a);
|
||||
return !((r->n[9] == 0x3FFFFFUL) & ((r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL) & ((r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
|
||||
}
|
||||
|
||||
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
|
||||
static void rustsecp256k1_v0_8_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r[0] = (a->n[9] >> 14) & 0xff;
|
||||
r[1] = (a->n[9] >> 6) & 0xff;
|
||||
r[2] = ((a->n[9] & 0x3F) << 2) | ((a->n[8] >> 24) & 0x3);
|
||||
|
@ -416,15 +344,15 @@ static void rustsecp256k1_v0_8_1_fe_get_b32(unsigned char *r, const rustsecp256k
|
|||
r[31] = a->n[0] & 0xff;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_negate(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int m) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= m);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_negate_unchecked(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int m) {
|
||||
/* For all legal values of m (0..31), the following properties hold: */
|
||||
VERIFY_CHECK(0x3FFFC2FUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
|
||||
VERIFY_CHECK(0x3FFFFBFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
|
||||
VERIFY_CHECK(0x3FFFFFFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
|
||||
VERIFY_CHECK(0x03FFFFFUL * 2 * (m + 1) >= 0x03FFFFFUL * 2 * m);
|
||||
#endif
|
||||
|
||||
/* Due to the properties above, the left hand in the subtractions below is never less than
|
||||
* the right hand. */
|
||||
r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0];
|
||||
r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1];
|
||||
r->n[2] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[2];
|
||||
|
@ -435,14 +363,9 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_negate(rustsecp256k1_v0_8_1
|
|||
r->n[7] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[7];
|
||||
r->n[8] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[8];
|
||||
r->n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->n[9];
|
||||
#ifdef VERIFY
|
||||
r->magnitude = m + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_int(rustsecp256k1_v0_8_1_fe *r, int a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] *= a;
|
||||
r->n[1] *= a;
|
||||
r->n[2] *= a;
|
||||
|
@ -453,17 +376,9 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_int(rustsecp256k1_v0_8_
|
|||
r->n[7] *= a;
|
||||
r->n[8] *= a;
|
||||
r->n[9] *= a;
|
||||
#ifdef VERIFY
|
||||
r->magnitude *= a;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_add(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_add(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r->n[0] += a->n[0];
|
||||
r->n[1] += a->n[1];
|
||||
r->n[2] += a->n[2];
|
||||
|
@ -474,18 +389,17 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_add(rustsecp256k1_v0_8_1_fe
|
|||
r->n[7] += a->n[7];
|
||||
r->n[8] += a->n[8];
|
||||
r->n[9] += a->n[9];
|
||||
#ifdef VERIFY
|
||||
r->magnitude += a->magnitude;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_add_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] += a;
|
||||
}
|
||||
|
||||
#if defined(USE_EXTERNAL_ASM)
|
||||
|
||||
/* External assembler implementation */
|
||||
void rustsecp256k1_v0_8_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
|
||||
void rustsecp256k1_v0_8_1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
|
||||
void rustsecp256k1_v0_9_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
|
||||
void rustsecp256k1_v0_9_0_fe_sqr_inner(uint32_t *r, const uint32_t *a);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -495,7 +409,7 @@ void rustsecp256k1_v0_8_1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
|
|||
#define VERIFY_BITS(x, n) do { } while(0)
|
||||
#endif
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
|
||||
uint64_t c, d;
|
||||
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
|
||||
uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
|
||||
|
@ -825,7 +739,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint32_t *r, cons
|
|||
/* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
|
||||
uint64_t c, d;
|
||||
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
|
||||
uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
|
||||
|
@ -1100,40 +1014,19 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint32_t *r, cons
|
|||
}
|
||||
#endif
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_mul(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe * SECP256K1_RESTRICT b) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= 8);
|
||||
VERIFY_CHECK(b->magnitude <= 8);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
rustsecp256k1_v0_8_1_fe_verify(b);
|
||||
VERIFY_CHECK(r != b);
|
||||
VERIFY_CHECK(a != b);
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_fe_mul_inner(r->n, a->n, b->n);
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_mul(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT b) {
|
||||
rustsecp256k1_v0_9_0_fe_mul_inner(r->n, a->n, b->n);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_sqr(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= 8);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_fe_sqr_inner(r->n, a->n);
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_sqr(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_sqr_inner(r->n, a->n);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_cmov(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int flag) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_cmov(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int flag) {
|
||||
uint32_t mask0, mask1;
|
||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = flag + ~((uint32_t)0);
|
||||
volatile int vflag = flag;
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = vflag + ~((uint32_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
|
@ -1145,25 +1038,14 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_cmov(rustsecp256k1_v0_8_1_f
|
|||
r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
|
||||
r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1);
|
||||
r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1);
|
||||
#ifdef VERIFY
|
||||
if (flag) {
|
||||
r->magnitude = a->magnitude;
|
||||
r->normalized = a->normalized;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_impl_half(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
|
||||
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
|
||||
uint32_t one = (uint32_t)1;
|
||||
uint32_t mask = -(t0 & one) >> 6;
|
||||
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
VERIFY_CHECK(r->magnitude < 32);
|
||||
#endif
|
||||
|
||||
/* Bounds analysis (over the rationals).
|
||||
*
|
||||
* Let m = r->magnitude
|
||||
|
@ -1210,10 +1092,8 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_f
|
|||
*
|
||||
* Current bounds: t0..t8 <= C * (m/2 + 1/2)
|
||||
* t9 <= D * (m/2 + 1/4)
|
||||
*/
|
||||
|
||||
#ifdef VERIFY
|
||||
/* Therefore the output magnitude (M) has to be set such that:
|
||||
*
|
||||
* Therefore the output magnitude (M) has to be set such that:
|
||||
* t0..t8: C * M >= C * (m/2 + 1/2)
|
||||
* t9: D * M >= D * (m/2 + 1/4)
|
||||
*
|
||||
|
@ -1223,16 +1103,13 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_f
|
|||
* and since we want the smallest such integer value for M:
|
||||
* M == floor(m/2) + 1
|
||||
*/
|
||||
r->magnitude = (r->magnitude >> 1) + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_storage_cmov(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe_storage *a, int flag) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_storage_cmov(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe_storage *a, int flag) {
|
||||
uint32_t mask0, mask1;
|
||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = flag + ~((uint32_t)0);
|
||||
volatile int vflag = flag;
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = vflag + ~((uint32_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
|
@ -1244,10 +1121,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_storage_cmov(rustsecp256k1_
|
|||
r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_to_storage(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
#endif
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_to_storage(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r->n[0] = a->n[0] | a->n[1] << 26;
|
||||
r->n[1] = a->n[1] >> 6 | a->n[2] << 20;
|
||||
r->n[2] = a->n[2] >> 12 | a->n[3] << 14;
|
||||
|
@ -1258,7 +1132,7 @@ static void rustsecp256k1_v0_8_1_fe_to_storage(rustsecp256k1_v0_8_1_fe_storage *
|
|||
r->n[7] = a->n[8] >> 16 | a->n[9] << 10;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_from_storage(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe_storage *a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_impl_from_storage(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe_storage *a) {
|
||||
r->n[0] = a->n[0] & 0x3FFFFFFUL;
|
||||
r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL);
|
||||
r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL);
|
||||
|
@ -1269,19 +1143,14 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_from_storage(rustsecp256k1_
|
|||
r->n[7] = a->n[5] >> 22 | ((a->n[6] << 10) & 0x3FFFFFFUL);
|
||||
r->n[8] = a->n[6] >> 16 | ((a->n[7] << 16) & 0x3FFFFFFUL);
|
||||
r->n[9] = a->n[7] >> 10;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_from_signed30(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_modinv32_signed30 *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_from_signed30(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_modinv32_signed30 *a) {
|
||||
const uint32_t M26 = UINT32_MAX >> 6;
|
||||
const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4],
|
||||
a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8];
|
||||
|
||||
/* The output from rustsecp256k1_v0_8_1_modinv32{_var} should be normalized to range [0,modulus), and
|
||||
/* The output from rustsecp256k1_v0_9_0_modinv32{_var} should be normalized to range [0,modulus), and
|
||||
* have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8).
|
||||
*/
|
||||
VERIFY_CHECK(a0 >> 30 == 0);
|
||||
|
@ -1304,23 +1173,13 @@ static void rustsecp256k1_v0_8_1_fe_from_signed30(rustsecp256k1_v0_8_1_fe *r, co
|
|||
r->n[7] = (a6 >> 2 ) & M26;
|
||||
r->n[8] = (a6 >> 28 | a7 << 2) & M26;
|
||||
r->n[9] = (a7 >> 24 | a8 << 6);
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_to_signed30(rustsecp256k1_v0_8_1_modinv32_signed30 *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_to_signed30(rustsecp256k1_v0_9_0_modinv32_signed30 *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint32_t M30 = UINT32_MAX >> 2;
|
||||
const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4],
|
||||
a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9];
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
#endif
|
||||
|
||||
r->v[0] = (a0 | a1 << 26) & M30;
|
||||
r->v[1] = (a1 >> 4 | a2 << 22) & M30;
|
||||
r->v[2] = (a2 >> 8 | a3 << 18) & M30;
|
||||
|
@ -1333,35 +1192,52 @@ static void rustsecp256k1_v0_8_1_fe_to_signed30(rustsecp256k1_v0_8_1_modinv32_si
|
|||
r->v[8] = a9 >> 6;
|
||||
}
|
||||
|
||||
static const rustsecp256k1_v0_8_1_modinv32_modinfo rustsecp256k1_v0_8_1_const_modinfo_fe = {
|
||||
static const rustsecp256k1_v0_9_0_modinv32_modinfo rustsecp256k1_v0_9_0_const_modinfo_fe = {
|
||||
{{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}},
|
||||
0x2DDACACFL
|
||||
};
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_inv(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *x) {
|
||||
rustsecp256k1_v0_8_1_fe tmp;
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 s;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp = *x;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 s;
|
||||
|
||||
tmp = *x;
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&tmp);
|
||||
rustsecp256k1_v0_8_1_fe_to_signed30(&s, &tmp);
|
||||
rustsecp256k1_v0_8_1_modinv32(&s, &rustsecp256k1_v0_8_1_const_modinfo_fe);
|
||||
rustsecp256k1_v0_8_1_fe_from_signed30(r, &s);
|
||||
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_1_fe_normalizes_to_zero(&tmp));
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&tmp);
|
||||
rustsecp256k1_v0_9_0_fe_to_signed30(&s, &tmp);
|
||||
rustsecp256k1_v0_9_0_modinv32(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
rustsecp256k1_v0_9_0_fe_from_signed30(r, &s);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_inv_var(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *x) {
|
||||
rustsecp256k1_v0_8_1_fe tmp;
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 s;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv_var(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp = *x;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 s;
|
||||
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&tmp);
|
||||
rustsecp256k1_v0_9_0_fe_to_signed30(&s, &tmp);
|
||||
rustsecp256k1_v0_9_0_modinv32_var(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
rustsecp256k1_v0_9_0_fe_from_signed30(r, &s);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_is_square_var(const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 s;
|
||||
int jac, ret;
|
||||
|
||||
tmp = *x;
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&tmp);
|
||||
rustsecp256k1_v0_8_1_fe_to_signed30(&s, &tmp);
|
||||
rustsecp256k1_v0_8_1_modinv32_var(&s, &rustsecp256k1_v0_8_1_const_modinfo_fe);
|
||||
rustsecp256k1_v0_8_1_fe_from_signed30(r, &s);
|
||||
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_1_fe_normalizes_to_zero(&tmp));
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&tmp);
|
||||
/* rustsecp256k1_v0_9_0_jacobi32_maybe_var cannot deal with input 0. */
|
||||
if (rustsecp256k1_v0_9_0_fe_is_zero(&tmp)) return 1;
|
||||
rustsecp256k1_v0_9_0_fe_to_signed30(&s, &tmp);
|
||||
jac = rustsecp256k1_v0_9_0_jacobi32_maybe_var(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
if (jac == 0) {
|
||||
/* rustsecp256k1_v0_9_0_jacobi32_maybe_var failed to compute the Jacobi symbol. Fall back
|
||||
* to computing a square root. This should be extremely rare with random
|
||||
* input (except in VERIFY mode, where a lower iteration count is used). */
|
||||
rustsecp256k1_v0_9_0_fe dummy;
|
||||
ret = rustsecp256k1_v0_9_0_fe_sqrt(&dummy, &tmp);
|
||||
} else {
|
||||
ret = jac >= 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_FIELD_REPR_IMPL_H */
|
||||
|
|
|
@ -9,16 +9,29 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
/** This field implementation represents the value as 5 uint64_t limbs in base
|
||||
* 2^52. */
|
||||
typedef struct {
|
||||
/* X = sum(i=0..4, n[i]*2^(i*52)) mod p
|
||||
* where p = 2^256 - 0x1000003D1
|
||||
*/
|
||||
/* A field element f represents the sum(i=0..4, f.n[i] << (i*52)) mod p,
|
||||
* where p is the field modulus, 2^256 - 2^32 - 977.
|
||||
*
|
||||
* The individual limbs f.n[i] can exceed 2^52; the field's magnitude roughly
|
||||
* corresponds to how much excess is allowed. The value
|
||||
* sum(i=0..4, f.n[i] << (i*52)) may exceed p, unless the field element is
|
||||
* normalized. */
|
||||
uint64_t n[5];
|
||||
#ifdef VERIFY
|
||||
int magnitude;
|
||||
int normalized;
|
||||
#endif
|
||||
} rustsecp256k1_v0_8_1_fe;
|
||||
/*
|
||||
* Magnitude m requires:
|
||||
* n[i] <= 2 * m * (2^52 - 1) for i=0..3
|
||||
* n[4] <= 2 * m * (2^48 - 1)
|
||||
*
|
||||
* Normalized requires:
|
||||
* n[i] <= (2^52 - 1) for i=0..3
|
||||
* sum(i=0..4, n[i] << (i*52)) < p
|
||||
* (together these imply n[4] <= 2^48 - 1)
|
||||
*/
|
||||
SECP256K1_FE_VERIFY_FIELDS
|
||||
} rustsecp256k1_v0_9_0_fe;
|
||||
|
||||
/* Unpacks a constant into a overlapping multi-limbed FE element. */
|
||||
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
|
||||
|
@ -29,15 +42,9 @@ typedef struct {
|
|||
((uint64_t)(d6) >> 16) | (((uint64_t)(d7)) << 16) \
|
||||
}
|
||||
|
||||
#ifdef VERIFY
|
||||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1}
|
||||
#else
|
||||
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
uint64_t n[4];
|
||||
} rustsecp256k1_v0_8_1_fe_storage;
|
||||
} rustsecp256k1_v0_9_0_fe_storage;
|
||||
|
||||
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \
|
||||
(d0) | (((uint64_t)(d1)) << 32), \
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
|
||||
#define SECP256K1_FIELD_INNER5X52_IMPL_H
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
|
||||
#include "util.h"
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
|
||||
/**
|
||||
* Registers: rdx:rax = multiplication accumulator
|
||||
* r9:r8 = c
|
||||
|
@ -278,13 +280,13 @@ __asm__ __volatile__(
|
|||
"addq %%rsi,%%r8\n"
|
||||
/* r[4] = c */
|
||||
"movq %%r8,32(%%rdi)\n"
|
||||
: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3)
|
||||
: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3)
|
||||
: "b"(b), "D"(r)
|
||||
: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
|
||||
/**
|
||||
* Registers: rdx:rax = multiplication accumulator
|
||||
* r9:r8 = c
|
||||
|
@ -493,7 +495,7 @@ __asm__ __volatile__(
|
|||
"addq %%rsi,%%r8\n"
|
||||
/* r[4] = c */
|
||||
"movq %%r8,32(%%rdi)\n"
|
||||
: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3)
|
||||
: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3)
|
||||
: "D"(r)
|
||||
: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
|
||||
);
|
||||
|
|
|
@ -7,10 +7,7 @@
|
|||
#ifndef SECP256K1_FIELD_REPR_IMPL_H
|
||||
#define SECP256K1_FIELD_REPR_IMPL_H
|
||||
|
||||
#if defined HAVE_CONFIG_H
|
||||
#include "libsecp256k1-config.h"
|
||||
#endif
|
||||
|
||||
#include "checkmem.h"
|
||||
#include "util.h"
|
||||
#include "field.h"
|
||||
#include "modinv64_impl.h"
|
||||
|
@ -21,59 +18,33 @@
|
|||
#include "field_5x52_int128_impl.h"
|
||||
#endif
|
||||
|
||||
/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
|
||||
* represented as 5 uint64_t's in base 2^52, least significant first. Note that the limbs are allowed to
|
||||
* contain >52 bits each.
|
||||
*
|
||||
* Each field element has a 'magnitude' associated with it. Internally, a magnitude M means:
|
||||
* - 2*M*(2^48-1) is the max (inclusive) of the most significant limb
|
||||
* - 2*M*(2^52-1) is the max (inclusive) of the remaining limbs
|
||||
*
|
||||
* Operations have different rules for propagating magnitude to their outputs. If an operation takes a
|
||||
* magnitude M as a parameter, that means the magnitude of input field elements can be at most M (inclusive).
|
||||
*
|
||||
* Each field element also has a 'normalized' flag. A field element is normalized if its magnitude is either
|
||||
* 0 or 1, and its value is already reduced modulo the order of the field.
|
||||
*/
|
||||
|
||||
#ifdef VERIFY
|
||||
static void rustsecp256k1_v0_8_1_fe_verify(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_verify(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint64_t *d = a->n;
|
||||
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
|
||||
int m = a->normalized ? 1 : 2 * a->magnitude;
|
||||
/* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
|
||||
r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
|
||||
r &= (a->magnitude >= 0);
|
||||
r &= (a->magnitude <= 2048);
|
||||
VERIFY_CHECK(d[0] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
VERIFY_CHECK(d[1] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
VERIFY_CHECK(d[2] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
VERIFY_CHECK(d[3] <= 0xFFFFFFFFFFFFFULL * m);
|
||||
VERIFY_CHECK(d[4] <= 0x0FFFFFFFFFFFFULL * m);
|
||||
if (a->normalized) {
|
||||
r &= (a->magnitude <= 1);
|
||||
if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
|
||||
r &= (d[0] < 0xFFFFEFFFFFC2FULL);
|
||||
if ((d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
|
||||
VERIFY_CHECK(d[0] < 0xFFFFEFFFFFC2FULL);
|
||||
}
|
||||
}
|
||||
VERIFY_CHECK(r == 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_get_bounds(rustsecp256k1_v0_8_1_fe *r, int m) {
|
||||
VERIFY_CHECK(m >= 0);
|
||||
VERIFY_CHECK(m <= 2048);
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_bounds(rustsecp256k1_v0_9_0_fe *r, int m) {
|
||||
r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m;
|
||||
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m;
|
||||
r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m;
|
||||
r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * m;
|
||||
r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = m;
|
||||
r->normalized = (m == 0);
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
|
||||
|
||||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
|
||||
|
@ -108,15 +79,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize(rustsecp256k1_v0_8_1_fe *r) {
|
|||
t4 &= 0x0FFFFFFFFFFFFULL;
|
||||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_weak(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_weak(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
|
||||
|
||||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
|
||||
|
@ -133,14 +98,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize_weak(rustsecp256k1_v0_8_1_fe *r) {
|
|||
VERIFY_CHECK(t4 >> 49 == 0);
|
||||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_normalize_var(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_var(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
|
||||
|
||||
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
|
||||
|
@ -176,15 +136,9 @@ static void rustsecp256k1_v0_8_1_fe_normalize_var(rustsecp256k1_v0_8_1_fe *r) {
|
|||
}
|
||||
|
||||
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero(const rustsecp256k1_v0_8_1_fe *r) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
|
||||
|
||||
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
|
||||
|
@ -207,7 +161,7 @@ static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero(const rustsecp256k1_v0_8_1
|
|||
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_1_fe *r) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0, t1, t2, t3, t4;
|
||||
uint64_t z0, z1;
|
||||
uint64_t x;
|
||||
|
@ -248,53 +202,29 @@ static int rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0
|
|||
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_set_int(rustsecp256k1_v0_8_1_fe *r, int a) {
|
||||
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_set_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] = a;
|
||||
r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = (a != 0);
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_is_zero(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_impl_is_zero(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint64_t *t = a->n;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_is_odd(const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_impl_is_odd(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
return a->n[0] & 1;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_clear(rustsecp256k1_v0_8_1_fe *a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_clear(rustsecp256k1_v0_9_0_fe *a) {
|
||||
int i;
|
||||
#ifdef VERIFY
|
||||
a->magnitude = 0;
|
||||
a->normalized = 1;
|
||||
#endif
|
||||
for (i=0; i<5; i++) {
|
||||
a->n[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_cmp_var(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b) {
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_cmp_var(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b) {
|
||||
int i;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
VERIFY_CHECK(b->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
rustsecp256k1_v0_8_1_fe_verify(b);
|
||||
#endif
|
||||
for (i = 4; i >= 0; i--) {
|
||||
if (a->n[i] > b->n[i]) {
|
||||
return 1;
|
||||
|
@ -306,8 +236,7 @@ static int rustsecp256k1_v0_8_1_fe_cmp_var(const rustsecp256k1_v0_8_1_fe *a, con
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_set_b32(rustsecp256k1_v0_8_1_fe *r, const unsigned char *a) {
|
||||
int ret;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
r->n[0] = (uint64_t)a[31]
|
||||
| ((uint64_t)a[30] << 8)
|
||||
| ((uint64_t)a[29] << 16)
|
||||
|
@ -342,25 +271,15 @@ static int rustsecp256k1_v0_8_1_fe_set_b32(rustsecp256k1_v0_8_1_fe *r, const uns
|
|||
| ((uint64_t)a[2] << 24)
|
||||
| ((uint64_t)a[1] << 32)
|
||||
| ((uint64_t)a[0] << 40);
|
||||
ret = !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL));
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
if (ret) {
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
} else {
|
||||
r->normalized = 0;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_set_b32_limit(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(r, a);
|
||||
return !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL));
|
||||
}
|
||||
|
||||
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
|
||||
static void rustsecp256k1_v0_8_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r[0] = (a->n[4] >> 40) & 0xFF;
|
||||
r[1] = (a->n[4] >> 32) & 0xFF;
|
||||
r[2] = (a->n[4] >> 24) & 0xFF;
|
||||
|
@ -395,113 +314,67 @@ static void rustsecp256k1_v0_8_1_fe_get_b32(unsigned char *r, const rustsecp256k
|
|||
r[31] = a->n[0] & 0xFF;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_negate(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int m) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= m);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_negate_unchecked(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int m) {
|
||||
/* For all legal values of m (0..31), the following properties hold: */
|
||||
VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
|
||||
VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
|
||||
VERIFY_CHECK(0x0FFFFFFFFFFFFULL * 2 * (m + 1) >= 0x0FFFFFFFFFFFFULL * 2 * m);
|
||||
#endif
|
||||
|
||||
/* Due to the properties above, the left hand in the subtractions below is never less than
|
||||
* the right hand. */
|
||||
r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
|
||||
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
|
||||
r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
|
||||
r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
|
||||
r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
|
||||
#ifdef VERIFY
|
||||
r->magnitude = m + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_int(rustsecp256k1_v0_8_1_fe *r, int a) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] *= a;
|
||||
r->n[1] *= a;
|
||||
r->n[2] *= a;
|
||||
r->n[3] *= a;
|
||||
r->n[4] *= a;
|
||||
#ifdef VERIFY
|
||||
r->magnitude *= a;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_add(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_add_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
r->n[0] += a;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_add(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r->n[0] += a->n[0];
|
||||
r->n[1] += a->n[1];
|
||||
r->n[2] += a->n[2];
|
||||
r->n[3] += a->n[3];
|
||||
r->n[4] += a->n[4];
|
||||
#ifdef VERIFY
|
||||
r->magnitude += a->magnitude;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_mul(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe * SECP256K1_RESTRICT b) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= 8);
|
||||
VERIFY_CHECK(b->magnitude <= 8);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
rustsecp256k1_v0_8_1_fe_verify(b);
|
||||
VERIFY_CHECK(r != b);
|
||||
VERIFY_CHECK(a != b);
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_fe_mul_inner(r->n, a->n, b->n);
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_mul(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT b) {
|
||||
rustsecp256k1_v0_9_0_fe_mul_inner(r->n, a->n, b->n);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_sqr(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->magnitude <= 8);
|
||||
rustsecp256k1_v0_8_1_fe_verify(a);
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_fe_sqr_inner(r->n, a->n);
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_sqr(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_sqr_inner(r->n, a->n);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_cmov(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a, int flag) {
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_impl_cmov(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int flag) {
|
||||
uint64_t mask0, mask1;
|
||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = flag + ~((uint64_t)0);
|
||||
volatile int vflag = flag;
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = vflag + ~((uint64_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
|
||||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
|
||||
r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
|
||||
#ifdef VERIFY
|
||||
if (flag) {
|
||||
r->magnitude = a->magnitude;
|
||||
r->normalized = a->normalized;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_fe *r) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_impl_half(rustsecp256k1_v0_9_0_fe *r) {
|
||||
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
|
||||
uint64_t one = (uint64_t)1;
|
||||
uint64_t mask = -(t0 & one) >> 12;
|
||||
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
VERIFY_CHECK(r->magnitude < 32);
|
||||
#endif
|
||||
|
||||
/* Bounds analysis (over the rationals).
|
||||
*
|
||||
* Let m = r->magnitude
|
||||
|
@ -538,10 +411,8 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_f
|
|||
*
|
||||
* Current bounds: t0..t3 <= C * (m/2 + 1/2)
|
||||
* t4 <= D * (m/2 + 1/4)
|
||||
*/
|
||||
|
||||
#ifdef VERIFY
|
||||
/* Therefore the output magnitude (M) has to be set such that:
|
||||
*
|
||||
* Therefore the output magnitude (M) has to be set such that:
|
||||
* t0..t3: C * M >= C * (m/2 + 1/2)
|
||||
* t4: D * M >= D * (m/2 + 1/4)
|
||||
*
|
||||
|
@ -551,16 +422,13 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_half(rustsecp256k1_v0_8_1_f
|
|||
* and since we want the smallest such integer value for M:
|
||||
* M == floor(m/2) + 1
|
||||
*/
|
||||
r->magnitude = (r->magnitude >> 1) + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_storage_cmov(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe_storage *a, int flag) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_storage_cmov(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe_storage *a, int flag) {
|
||||
uint64_t mask0, mask1;
|
||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = flag + ~((uint64_t)0);
|
||||
volatile int vflag = flag;
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||
mask0 = vflag + ~((uint64_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
|
@ -568,34 +436,26 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_storage_cmov(rustsecp256k1_
|
|||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_to_storage(rustsecp256k1_v0_8_1_fe_storage *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
#endif
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_to_storage(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
r->n[0] = a->n[0] | a->n[1] << 52;
|
||||
r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
|
||||
r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
|
||||
r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_fe_from_storage(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe_storage *a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_fe_impl_from_storage(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe_storage *a) {
|
||||
r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
|
||||
r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
|
||||
r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
|
||||
r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
|
||||
r->n[4] = a->n[3] >> 16;
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_from_signed62(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_modinv64_signed62 *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_from_signed62(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_modinv64_signed62 *a) {
|
||||
const uint64_t M52 = UINT64_MAX >> 12;
|
||||
const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
|
||||
|
||||
/* The output from rustsecp256k1_v0_8_1_modinv64{_var} should be normalized to range [0,modulus), and
|
||||
/* The output from rustsecp256k1_v0_9_0_modinv64{_var} should be normalized to range [0,modulus), and
|
||||
* have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
|
||||
*/
|
||||
VERIFY_CHECK(a0 >> 62 == 0);
|
||||
|
@ -609,22 +469,12 @@ static void rustsecp256k1_v0_8_1_fe_from_signed62(rustsecp256k1_v0_8_1_fe *r, co
|
|||
r->n[2] = (a1 >> 42 | a2 << 20) & M52;
|
||||
r->n[3] = (a2 >> 32 | a3 << 30) & M52;
|
||||
r->n[4] = (a3 >> 22 | a4 << 40);
|
||||
|
||||
#ifdef VERIFY
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_8_1_fe_verify(r);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_to_signed62(rustsecp256k1_v0_8_1_modinv64_signed62 *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
static void rustsecp256k1_v0_9_0_fe_to_signed62(rustsecp256k1_v0_9_0_modinv64_signed62 *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
const uint64_t M62 = UINT64_MAX >> 2;
|
||||
const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4];
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(a->normalized);
|
||||
#endif
|
||||
|
||||
r->v[0] = (a0 | a1 << 52) & M62;
|
||||
r->v[1] = (a1 >> 10 | a2 << 42) & M62;
|
||||
r->v[2] = (a2 >> 20 | a3 << 32) & M62;
|
||||
|
@ -632,39 +482,52 @@ static void rustsecp256k1_v0_8_1_fe_to_signed62(rustsecp256k1_v0_8_1_modinv64_si
|
|||
r->v[4] = a4 >> 40;
|
||||
}
|
||||
|
||||
static const rustsecp256k1_v0_8_1_modinv64_modinfo rustsecp256k1_v0_8_1_const_modinfo_fe = {
|
||||
static const rustsecp256k1_v0_9_0_modinv64_modinfo rustsecp256k1_v0_9_0_const_modinfo_fe = {
|
||||
{{-0x1000003D1LL, 0, 0, 0, 256}},
|
||||
0x27C7F6E22DDACACFLL
|
||||
};
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_inv(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *x) {
|
||||
rustsecp256k1_v0_8_1_fe tmp;
|
||||
rustsecp256k1_v0_8_1_modinv64_signed62 s;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp = *x;
|
||||
rustsecp256k1_v0_9_0_modinv64_signed62 s;
|
||||
|
||||
tmp = *x;
|
||||
rustsecp256k1_v0_8_1_fe_normalize(&tmp);
|
||||
rustsecp256k1_v0_8_1_fe_to_signed62(&s, &tmp);
|
||||
rustsecp256k1_v0_8_1_modinv64(&s, &rustsecp256k1_v0_8_1_const_modinfo_fe);
|
||||
rustsecp256k1_v0_8_1_fe_from_signed62(r, &s);
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_1_fe_normalizes_to_zero(&tmp));
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_fe_normalize(&tmp);
|
||||
rustsecp256k1_v0_9_0_fe_to_signed62(&s, &tmp);
|
||||
rustsecp256k1_v0_9_0_modinv64(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
rustsecp256k1_v0_9_0_fe_from_signed62(r, &s);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_fe_inv_var(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *x) {
|
||||
rustsecp256k1_v0_8_1_fe tmp;
|
||||
rustsecp256k1_v0_8_1_modinv64_signed62 s;
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv_var(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp = *x;
|
||||
rustsecp256k1_v0_9_0_modinv64_signed62 s;
|
||||
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&tmp);
|
||||
rustsecp256k1_v0_9_0_fe_to_signed62(&s, &tmp);
|
||||
rustsecp256k1_v0_9_0_modinv64_var(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
rustsecp256k1_v0_9_0_fe_from_signed62(r, &s);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_is_square_var(const rustsecp256k1_v0_9_0_fe *x) {
|
||||
rustsecp256k1_v0_9_0_fe tmp;
|
||||
rustsecp256k1_v0_9_0_modinv64_signed62 s;
|
||||
int jac, ret;
|
||||
|
||||
tmp = *x;
|
||||
rustsecp256k1_v0_8_1_fe_normalize_var(&tmp);
|
||||
rustsecp256k1_v0_8_1_fe_to_signed62(&s, &tmp);
|
||||
rustsecp256k1_v0_8_1_modinv64_var(&s, &rustsecp256k1_v0_8_1_const_modinfo_fe);
|
||||
rustsecp256k1_v0_8_1_fe_from_signed62(r, &s);
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_1_fe_normalizes_to_zero(&tmp));
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&tmp);
|
||||
/* rustsecp256k1_v0_9_0_jacobi64_maybe_var cannot deal with input 0. */
|
||||
if (rustsecp256k1_v0_9_0_fe_is_zero(&tmp)) return 1;
|
||||
rustsecp256k1_v0_9_0_fe_to_signed62(&s, &tmp);
|
||||
jac = rustsecp256k1_v0_9_0_jacobi64_maybe_var(&s, &rustsecp256k1_v0_9_0_const_modinfo_fe);
|
||||
if (jac == 0) {
|
||||
/* rustsecp256k1_v0_9_0_jacobi64_maybe_var failed to compute the Jacobi symbol. Fall back
|
||||
* to computing a square root. This should be extremely rare with random
|
||||
* input (except in VERIFY mode, where a lower iteration count is used). */
|
||||
rustsecp256k1_v0_9_0_fe dummy;
|
||||
ret = rustsecp256k1_v0_9_0_fe_sqrt(&dummy, &tmp);
|
||||
} else {
|
||||
ret = jac >= 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_FIELD_REPR_IMPL_H */
|
||||
|
|
|
@ -10,17 +10,18 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "int128.h"
|
||||
#include "util.h"
|
||||
|
||||
#ifdef VERIFY
|
||||
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
|
||||
#define VERIFY_BITS_128(x, n) VERIFY_CHECK(rustsecp256k1_v0_8_1_u128_check_bits((x), (n)))
|
||||
#define VERIFY_BITS_128(x, n) VERIFY_CHECK(rustsecp256k1_v0_9_0_u128_check_bits((x), (n)))
|
||||
#else
|
||||
#define VERIFY_BITS(x, n) do { } while(0)
|
||||
#define VERIFY_BITS_128(x, n) do { } while(0)
|
||||
#endif
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
|
||||
rustsecp256k1_v0_8_1_uint128 c, d;
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
|
||||
rustsecp256k1_v0_9_0_uint128 c, d;
|
||||
uint64_t t3, t4, tx, u0;
|
||||
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
|
||||
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
|
||||
|
@ -44,35 +45,35 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint64_t *r, cons
|
|||
* Note that [x 0 0 0 0 0] = [x*R].
|
||||
*/
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_mul(&d, a0, b[3]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1, b[2]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, b[1]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, b[0]);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&d, a0, b[3]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1, b[2]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, b[1]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, b[0]);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 0 0] = [p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_mul(&c, a4, b[4]);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&c, a4, b[4]);
|
||||
VERIFY_BITS_128(&c, 112);
|
||||
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, R, rustsecp256k1_v0_8_1_u128_to_u64(&c)); rustsecp256k1_v0_8_1_u128_rshift(&c, 64);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, R, rustsecp256k1_v0_9_0_u128_to_u64(&c)); rustsecp256k1_v0_9_0_u128_rshift(&c, 64);
|
||||
VERIFY_BITS_128(&d, 115);
|
||||
VERIFY_BITS_128(&c, 48);
|
||||
/* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
t3 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
t3 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(t3, 52);
|
||||
VERIFY_BITS_128(&d, 63);
|
||||
/* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a0, b[4]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1, b[3]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, b[2]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, b[1]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a4, b[0]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a0, b[4]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1, b[3]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, b[2]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, b[1]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a4, b[0]);
|
||||
VERIFY_BITS_128(&d, 115);
|
||||
/* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_8_1_u128_to_u64(&c));
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_9_0_u128_to_u64(&c));
|
||||
VERIFY_BITS_128(&d, 116);
|
||||
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
t4 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
t4 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(t4, 52);
|
||||
VERIFY_BITS_128(&d, 64);
|
||||
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
|
@ -81,16 +82,16 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint64_t *r, cons
|
|||
VERIFY_BITS(t4, 48);
|
||||
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_mul(&c, a0, b[0]);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&c, a0, b[0]);
|
||||
VERIFY_BITS_128(&c, 112);
|
||||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1, b[4]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, b[3]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, b[2]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a4, b[1]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1, b[4]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, b[3]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, b[2]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a4, b[1]);
|
||||
VERIFY_BITS_128(&d, 115);
|
||||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
u0 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
u0 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(u0, 52);
|
||||
VERIFY_BITS_128(&d, 63);
|
||||
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
|
@ -98,65 +99,65 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_mul_inner(uint64_t *r, cons
|
|||
u0 = (u0 << 4) | tx;
|
||||
VERIFY_BITS(u0, 56);
|
||||
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, u0, R >> 4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, u0, R >> 4);
|
||||
VERIFY_BITS_128(&c, 115);
|
||||
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
r[0] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[0] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[0], 52);
|
||||
VERIFY_BITS_128(&c, 61);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a0, b[1]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a1, b[0]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a0, b[1]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a1, b[0]);
|
||||
VERIFY_BITS_128(&c, 114);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, b[4]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, b[3]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a4, b[2]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, b[4]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, b[3]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a4, b[2]);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, rustsecp256k1_v0_8_1_u128_to_u64(&d) & M, R); rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, rustsecp256k1_v0_9_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS_128(&c, 115);
|
||||
VERIFY_BITS_128(&d, 62);
|
||||
/* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
r[1] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[1] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[1], 52);
|
||||
VERIFY_BITS_128(&c, 63);
|
||||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a0, b[2]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a1, b[1]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a2, b[0]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a0, b[2]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a1, b[1]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a2, b[0]);
|
||||
VERIFY_BITS_128(&c, 114);
|
||||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, b[4]);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a4, b[3]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, b[4]);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a4, b[3]);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, R, rustsecp256k1_v0_8_1_u128_to_u64(&d)); rustsecp256k1_v0_8_1_u128_rshift(&d, 64);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, R, rustsecp256k1_v0_9_0_u128_to_u64(&d)); rustsecp256k1_v0_9_0_u128_rshift(&d, 64);
|
||||
VERIFY_BITS_128(&c, 115);
|
||||
VERIFY_BITS_128(&d, 50);
|
||||
/* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
|
||||
r[2] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[2] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[2], 52);
|
||||
VERIFY_BITS_128(&c, 63);
|
||||
/* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_8_1_u128_to_u64(&d));
|
||||
rustsecp256k1_v0_8_1_u128_accum_u64(&c, t3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_9_0_u128_to_u64(&d));
|
||||
rustsecp256k1_v0_9_0_u128_accum_u64(&c, t3);
|
||||
VERIFY_BITS_128(&c, 100);
|
||||
/* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
r[3] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[3] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[3], 52);
|
||||
VERIFY_BITS_128(&c, 48);
|
||||
/* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
r[4] = rustsecp256k1_v0_8_1_u128_to_u64(&c) + t4;
|
||||
r[4] = rustsecp256k1_v0_9_0_u128_to_u64(&c) + t4;
|
||||
VERIFY_BITS(r[4], 49);
|
||||
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
|
||||
rustsecp256k1_v0_8_1_uint128 c, d;
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
|
||||
rustsecp256k1_v0_9_0_uint128 c, d;
|
||||
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
|
||||
int64_t t3, t4, tx, u0;
|
||||
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
|
||||
|
@ -172,32 +173,32 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint64_t *r, cons
|
|||
* Note that [x 0 0 0 0 0] = [x*R].
|
||||
*/
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_mul(&d, a0*2, a3);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1*2, a2);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&d, a0*2, a3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1*2, a2);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 0 0] = [p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_mul(&c, a4, a4);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&c, a4, a4);
|
||||
VERIFY_BITS_128(&c, 112);
|
||||
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, R, rustsecp256k1_v0_8_1_u128_to_u64(&c)); rustsecp256k1_v0_8_1_u128_rshift(&c, 64);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, R, rustsecp256k1_v0_9_0_u128_to_u64(&c)); rustsecp256k1_v0_9_0_u128_rshift(&c, 64);
|
||||
VERIFY_BITS_128(&d, 115);
|
||||
VERIFY_BITS_128(&c, 48);
|
||||
/* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
t3 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
t3 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(t3, 52);
|
||||
VERIFY_BITS_128(&d, 63);
|
||||
/* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
|
||||
|
||||
a4 *= 2;
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a0, a4);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1*2, a3);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, a2);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a0, a4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1*2, a3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, a2);
|
||||
VERIFY_BITS_128(&d, 115);
|
||||
/* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_8_1_u128_to_u64(&c));
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_9_0_u128_to_u64(&c));
|
||||
VERIFY_BITS_128(&d, 116);
|
||||
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
t4 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
t4 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(t4, 52);
|
||||
VERIFY_BITS_128(&d, 64);
|
||||
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
|
@ -206,14 +207,14 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint64_t *r, cons
|
|||
VERIFY_BITS(t4, 48);
|
||||
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_mul(&c, a0, a0);
|
||||
rustsecp256k1_v0_9_0_u128_mul(&c, a0, a0);
|
||||
VERIFY_BITS_128(&c, 112);
|
||||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a1, a4);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2*2, a3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a1, a4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2*2, a3);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
u0 = rustsecp256k1_v0_8_1_u128_to_u64(&d) & M; rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
u0 = rustsecp256k1_v0_9_0_u128_to_u64(&d) & M; rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS(u0, 52);
|
||||
VERIFY_BITS_128(&d, 62);
|
||||
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
|
@ -221,56 +222,56 @@ SECP256K1_INLINE static void rustsecp256k1_v0_8_1_fe_sqr_inner(uint64_t *r, cons
|
|||
u0 = (u0 << 4) | tx;
|
||||
VERIFY_BITS(u0, 56);
|
||||
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, u0, R >> 4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, u0, R >> 4);
|
||||
VERIFY_BITS_128(&c, 113);
|
||||
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
r[0] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[0] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[0], 52);
|
||||
VERIFY_BITS_128(&c, 61);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
|
||||
|
||||
a0 *= 2;
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a0, a1);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a0, a1);
|
||||
VERIFY_BITS_128(&c, 114);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a2, a4);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, a3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a2, a4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, a3);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, rustsecp256k1_v0_8_1_u128_to_u64(&d) & M, R); rustsecp256k1_v0_8_1_u128_rshift(&d, 52);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, rustsecp256k1_v0_9_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_9_0_u128_rshift(&d, 52);
|
||||
VERIFY_BITS_128(&c, 115);
|
||||
VERIFY_BITS_128(&d, 62);
|
||||
/* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
r[1] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[1] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[1], 52);
|
||||
VERIFY_BITS_128(&c, 63);
|
||||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a0, a2);
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, a1, a1);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a0, a2);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, a1, a1);
|
||||
VERIFY_BITS_128(&c, 114);
|
||||
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&d, a3, a4);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&d, a3, a4);
|
||||
VERIFY_BITS_128(&d, 114);
|
||||
/* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, R, rustsecp256k1_v0_8_1_u128_to_u64(&d)); rustsecp256k1_v0_8_1_u128_rshift(&d, 64);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, R, rustsecp256k1_v0_9_0_u128_to_u64(&d)); rustsecp256k1_v0_9_0_u128_rshift(&d, 64);
|
||||
VERIFY_BITS_128(&c, 115);
|
||||
VERIFY_BITS_128(&d, 50);
|
||||
/* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
r[2] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[2] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[2], 52);
|
||||
VERIFY_BITS_128(&c, 63);
|
||||
/* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
|
||||
rustsecp256k1_v0_8_1_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_8_1_u128_to_u64(&d));
|
||||
rustsecp256k1_v0_8_1_u128_accum_u64(&c, t3);
|
||||
rustsecp256k1_v0_9_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_9_0_u128_to_u64(&d));
|
||||
rustsecp256k1_v0_9_0_u128_accum_u64(&c, t3);
|
||||
VERIFY_BITS_128(&c, 100);
|
||||
/* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
r[3] = rustsecp256k1_v0_8_1_u128_to_u64(&c) & M; rustsecp256k1_v0_8_1_u128_rshift(&c, 52);
|
||||
r[3] = rustsecp256k1_v0_9_0_u128_to_u64(&c) & M; rustsecp256k1_v0_9_0_u128_rshift(&c, 52);
|
||||
VERIFY_BITS(r[3], 52);
|
||||
VERIFY_BITS_128(&c, 48);
|
||||
/* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
r[4] = rustsecp256k1_v0_8_1_u128_to_u64(&c) + t4;
|
||||
r[4] = rustsecp256k1_v0_9_0_u128_to_u64(&c) + t4;
|
||||
VERIFY_BITS(r[4], 49);
|
||||
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
|
||||
}
|
||||
|
|
|
@ -7,10 +7,7 @@
|
|||
#ifndef SECP256K1_FIELD_IMPL_H
|
||||
#define SECP256K1_FIELD_IMPL_H
|
||||
|
||||
#if defined HAVE_CONFIG_H
|
||||
#include "libsecp256k1-config.h"
|
||||
#endif
|
||||
|
||||
#include "field.h"
|
||||
#include "util.h"
|
||||
|
||||
#if defined(SECP256K1_WIDEMUL_INT128)
|
||||
|
@ -21,21 +18,20 @@
|
|||
#error "Please select wide multiplication implementation"
|
||||
#endif
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_equal(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b) {
|
||||
rustsecp256k1_v0_8_1_fe na;
|
||||
rustsecp256k1_v0_8_1_fe_negate(&na, a, 1);
|
||||
rustsecp256k1_v0_8_1_fe_add(&na, b);
|
||||
return rustsecp256k1_v0_8_1_fe_normalizes_to_zero(&na);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_equal(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b) {
|
||||
rustsecp256k1_v0_9_0_fe na;
|
||||
#ifdef VERIFY
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(b);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 1);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(b, 31);
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_fe_negate(&na, a, 1);
|
||||
rustsecp256k1_v0_9_0_fe_add(&na, b);
|
||||
return rustsecp256k1_v0_9_0_fe_normalizes_to_zero(&na);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_8_1_fe_equal_var(const rustsecp256k1_v0_8_1_fe *a, const rustsecp256k1_v0_8_1_fe *b) {
|
||||
rustsecp256k1_v0_8_1_fe na;
|
||||
rustsecp256k1_v0_8_1_fe_negate(&na, a, 1);
|
||||
rustsecp256k1_v0_8_1_fe_add(&na, b);
|
||||
return rustsecp256k1_v0_8_1_fe_normalizes_to_zero_var(&na);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_8_1_fe_sqrt(rustsecp256k1_v0_8_1_fe *r, const rustsecp256k1_v0_8_1_fe *a) {
|
||||
static int rustsecp256k1_v0_9_0_fe_sqrt(rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT r, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT a) {
|
||||
/** Given that p is congruent to 3 mod 4, we can compute the square root of
|
||||
* a mod p as the (p+1)/4'th power of a.
|
||||
*
|
||||
|
@ -45,94 +41,384 @@ static int rustsecp256k1_v0_8_1_fe_sqrt(rustsecp256k1_v0_8_1_fe *r, const rustse
|
|||
* Also because (p+1)/4 is an even number, the computed square root is
|
||||
* itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)).
|
||||
*/
|
||||
rustsecp256k1_v0_8_1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
|
||||
int j;
|
||||
rustsecp256k1_v0_9_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
|
||||
int j, ret;
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(r != a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 8);
|
||||
#endif
|
||||
|
||||
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
|
||||
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
|
||||
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
|
||||
*/
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x2, a);
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x2, &x2, a);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x2, a);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x2, &x2, a);
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x3, &x2);
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x3, &x3, a);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x3, &x2);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x3, &x3, a);
|
||||
|
||||
x6 = x3;
|
||||
for (j=0; j<3; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x6, &x6);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x6, &x6);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x6, &x6, &x3);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x6, &x6, &x3);
|
||||
|
||||
x9 = x6;
|
||||
for (j=0; j<3; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x9, &x9);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x9, &x9);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x9, &x9, &x3);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x9, &x9, &x3);
|
||||
|
||||
x11 = x9;
|
||||
for (j=0; j<2; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x11, &x11);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x11, &x11);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x11, &x11, &x2);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x11, &x11, &x2);
|
||||
|
||||
x22 = x11;
|
||||
for (j=0; j<11; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x22, &x22);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x22, &x22);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x22, &x22, &x11);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x22, &x22, &x11);
|
||||
|
||||
x44 = x22;
|
||||
for (j=0; j<22; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x44, &x44);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x44, &x44);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x44, &x44, &x22);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x44, &x44, &x22);
|
||||
|
||||
x88 = x44;
|
||||
for (j=0; j<44; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x88, &x88);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x88, &x88);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x88, &x88, &x44);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x88, &x88, &x44);
|
||||
|
||||
x176 = x88;
|
||||
for (j=0; j<88; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x176, &x176);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x176, &x176);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x176, &x176, &x88);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x176, &x176, &x88);
|
||||
|
||||
x220 = x176;
|
||||
for (j=0; j<44; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x220, &x220);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x220, &x220);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x220, &x220, &x44);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x220, &x220, &x44);
|
||||
|
||||
x223 = x220;
|
||||
for (j=0; j<3; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&x223, &x223);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&x223, &x223);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&x223, &x223, &x3);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&x223, &x223, &x3);
|
||||
|
||||
/* The final result is then assembled using a sliding window over the blocks. */
|
||||
|
||||
t1 = x223;
|
||||
for (j=0; j<23; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&t1, &t1);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&t1, &t1);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&t1, &t1, &x22);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&t1, &t1, &x22);
|
||||
for (j=0; j<6; j++) {
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&t1, &t1);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&t1, &t1);
|
||||
}
|
||||
rustsecp256k1_v0_8_1_fe_mul(&t1, &t1, &x2);
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&t1, &t1);
|
||||
rustsecp256k1_v0_8_1_fe_sqr(r, &t1);
|
||||
rustsecp256k1_v0_9_0_fe_mul(&t1, &t1, &x2);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&t1, &t1);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(r, &t1);
|
||||
|
||||
/* Check that a square root was actually calculated */
|
||||
|
||||
rustsecp256k1_v0_8_1_fe_sqr(&t1, r);
|
||||
return rustsecp256k1_v0_8_1_fe_equal(&t1, a);
|
||||
rustsecp256k1_v0_9_0_fe_sqr(&t1, r);
|
||||
ret = rustsecp256k1_v0_9_0_fe_equal(&t1, a);
|
||||
|
||||
#ifdef VERIFY
|
||||
if (!ret) {
|
||||
rustsecp256k1_v0_9_0_fe_negate(&t1, &t1, 1);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_var(&t1);
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_fe_equal(&t1, a));
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef VERIFY
|
||||
static void rustsecp256k1_v0_9_0_fe_verify(const rustsecp256k1_v0_9_0_fe *a) { (void)a; }
|
||||
static void rustsecp256k1_v0_9_0_fe_verify_magnitude(const rustsecp256k1_v0_9_0_fe *a, int m) { (void)a; (void)m; }
|
||||
#else
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_verify(const rustsecp256k1_v0_9_0_fe *a);
|
||||
static void rustsecp256k1_v0_9_0_fe_verify(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
/* Magnitude between 0 and 32. */
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 32);
|
||||
/* Normalized is 0 or 1. */
|
||||
VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1));
|
||||
/* If normalized, magnitude must be 0 or 1. */
|
||||
if (a->normalized) rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 1);
|
||||
/* Invoke implementation-specific checks. */
|
||||
rustsecp256k1_v0_9_0_fe_impl_verify(a);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_verify_magnitude(const rustsecp256k1_v0_9_0_fe *a, int m) {
|
||||
VERIFY_CHECK(m >= 0);
|
||||
VERIFY_CHECK(m <= 32);
|
||||
VERIFY_CHECK(a->magnitude <= m);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize(rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_normalize(rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_impl_normalize(r);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_weak(rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_normalize_weak(rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_impl_normalize_weak(r);
|
||||
r->magnitude = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_normalize_var(rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_normalize_var(rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_impl_normalize_var(r);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero(const rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_normalizes_to_zero(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
return rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero(r);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero_var(const rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
return rustsecp256k1_v0_9_0_fe_impl_normalizes_to_zero_var(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_set_int(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_set_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
|
||||
rustsecp256k1_v0_9_0_fe_impl_set_int(r, a);
|
||||
r->magnitude = (a != 0);
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_add_int(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_add_int(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_impl_add_int(r, a);
|
||||
r->magnitude += 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_clear(rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_clear(rustsecp256k1_v0_9_0_fe *a) {
|
||||
a->magnitude = 0;
|
||||
a->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_impl_clear(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_is_zero(const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_is_zero(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(a->normalized);
|
||||
return rustsecp256k1_v0_9_0_fe_impl_is_zero(a);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_is_odd(const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_is_odd(const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(a->normalized);
|
||||
return rustsecp256k1_v0_9_0_fe_impl_is_odd(a);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_cmp_var(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_cmp_var(const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe *b) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(b);
|
||||
VERIFY_CHECK(a->normalized);
|
||||
VERIFY_CHECK(b->normalized);
|
||||
return rustsecp256k1_v0_9_0_fe_impl_cmp_var(a, b);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_set_b32_mod(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
rustsecp256k1_v0_9_0_fe_impl_set_b32_mod(r, a);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_set_b32_limit(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_set_b32_limit(rustsecp256k1_v0_9_0_fe *r, const unsigned char *a) {
|
||||
if (rustsecp256k1_v0_9_0_fe_impl_set_b32_limit(r, a)) {
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
return 1;
|
||||
} else {
|
||||
/* Mark the output field element as invalid. */
|
||||
r->magnitude = -1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_b32(unsigned char *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_9_0_fe_impl_get_b32(r, a);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_negate_unchecked(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int m);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_negate_unchecked(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int m) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(m >= 0 && m <= 31);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, m);
|
||||
rustsecp256k1_v0_9_0_fe_impl_negate_unchecked(r, a, m);
|
||||
r->magnitude = m + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_mul_int_unchecked(rustsecp256k1_v0_9_0_fe *r, int a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_mul_int_unchecked(rustsecp256k1_v0_9_0_fe *r, int a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
VERIFY_CHECK(a >= 0 && a <= 32);
|
||||
VERIFY_CHECK(a*r->magnitude <= 32);
|
||||
rustsecp256k1_v0_9_0_fe_impl_mul_int_unchecked(r, a);
|
||||
r->magnitude *= a;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_add(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_add(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(r->magnitude + a->magnitude <= 32);
|
||||
rustsecp256k1_v0_9_0_fe_impl_add(r, a);
|
||||
r->magnitude += a->magnitude;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_mul(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT b);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_mul(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, const rustsecp256k1_v0_9_0_fe * SECP256K1_RESTRICT b) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(b);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 8);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(b, 8);
|
||||
VERIFY_CHECK(r != b);
|
||||
VERIFY_CHECK(a != b);
|
||||
rustsecp256k1_v0_9_0_fe_impl_mul(r, a, b);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_sqr(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_sqr(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(a, 8);
|
||||
rustsecp256k1_v0_9_0_fe_impl_sqr(r, a);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_cmov(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int flag);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_cmov(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *a, int flag) {
|
||||
VERIFY_CHECK(flag == 0 || flag == 1);
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_impl_cmov(r, a, flag);
|
||||
if (a->magnitude > r->magnitude) r->magnitude = a->magnitude;
|
||||
if (!a->normalized) r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_to_storage(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_to_storage(rustsecp256k1_v0_9_0_fe_storage *r, const rustsecp256k1_v0_9_0_fe *a) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(a);
|
||||
VERIFY_CHECK(a->normalized);
|
||||
rustsecp256k1_v0_9_0_fe_impl_to_storage(r, a);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_from_storage(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe_storage *a);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_from_storage(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe_storage *a) {
|
||||
rustsecp256k1_v0_9_0_fe_impl_from_storage(r, a);
|
||||
r->magnitude = 1;
|
||||
r->normalized = 1;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_inv(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
int input_is_zero = rustsecp256k1_v0_9_0_fe_normalizes_to_zero(x);
|
||||
rustsecp256k1_v0_9_0_fe_verify(x);
|
||||
rustsecp256k1_v0_9_0_fe_impl_inv(r, x);
|
||||
r->magnitude = x->magnitude > 0;
|
||||
r->normalized = 1;
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_fe_normalizes_to_zero(r) == input_is_zero);
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_inv_var(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_inv_var(rustsecp256k1_v0_9_0_fe *r, const rustsecp256k1_v0_9_0_fe *x) {
|
||||
int input_is_zero = rustsecp256k1_v0_9_0_fe_normalizes_to_zero(x);
|
||||
rustsecp256k1_v0_9_0_fe_verify(x);
|
||||
rustsecp256k1_v0_9_0_fe_impl_inv_var(r, x);
|
||||
r->magnitude = x->magnitude > 0;
|
||||
r->normalized = 1;
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_fe_normalizes_to_zero(r) == input_is_zero);
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static int rustsecp256k1_v0_9_0_fe_impl_is_square_var(const rustsecp256k1_v0_9_0_fe *x);
|
||||
SECP256K1_INLINE static int rustsecp256k1_v0_9_0_fe_is_square_var(const rustsecp256k1_v0_9_0_fe *x) {
|
||||
int ret;
|
||||
rustsecp256k1_v0_9_0_fe tmp = *x, sqrt;
|
||||
rustsecp256k1_v0_9_0_fe_verify(x);
|
||||
ret = rustsecp256k1_v0_9_0_fe_impl_is_square_var(x);
|
||||
rustsecp256k1_v0_9_0_fe_normalize_weak(&tmp);
|
||||
VERIFY_CHECK(ret == rustsecp256k1_v0_9_0_fe_sqrt(&sqrt, &tmp));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_get_bounds(rustsecp256k1_v0_9_0_fe* r, int m);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_get_bounds(rustsecp256k1_v0_9_0_fe* r, int m) {
|
||||
VERIFY_CHECK(m >= 0);
|
||||
VERIFY_CHECK(m <= 32);
|
||||
rustsecp256k1_v0_9_0_fe_impl_get_bounds(r, m);
|
||||
r->magnitude = m;
|
||||
r->normalized = (m == 0);
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_9_0_fe_impl_half(rustsecp256k1_v0_9_0_fe *r);
|
||||
SECP256K1_INLINE static void rustsecp256k1_v0_9_0_fe_half(rustsecp256k1_v0_9_0_fe *r) {
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
rustsecp256k1_v0_9_0_fe_verify_magnitude(r, 31);
|
||||
rustsecp256k1_v0_9_0_fe_impl_half(r);
|
||||
r->magnitude = (r->magnitude >> 1) + 1;
|
||||
r->normalized = 0;
|
||||
rustsecp256k1_v0_9_0_fe_verify(r);
|
||||
}
|
||||
|
||||
#endif /* defined(VERIFY) */
|
||||
|
||||
#endif /* SECP256K1_FIELD_IMPL_H */
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
* Note: For exhaustive test mode, secp256k1 is replaced by a small subgroup of a different curve.
|
||||
*/
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_fe x;
|
||||
rustsecp256k1_v0_8_1_fe y;
|
||||
rustsecp256k1_v0_9_0_fe x;
|
||||
rustsecp256k1_v0_9_0_fe y;
|
||||
int infinity; /* whether this represents the point at infinity */
|
||||
} rustsecp256k1_v0_8_1_ge;
|
||||
} rustsecp256k1_v0_9_0_ge;
|
||||
|
||||
#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0}
|
||||
#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
|
||||
|
@ -26,48 +26,62 @@ typedef struct {
|
|||
* Note: For exhastive test mode, secp256k1 is replaced by a small subgroup of a different curve.
|
||||
*/
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_fe x; /* actual X: x/z^2 */
|
||||
rustsecp256k1_v0_8_1_fe y; /* actual Y: y/z^3 */
|
||||
rustsecp256k1_v0_8_1_fe z;
|
||||
rustsecp256k1_v0_9_0_fe x; /* actual X: x/z^2 */
|
||||
rustsecp256k1_v0_9_0_fe y; /* actual Y: y/z^3 */
|
||||
rustsecp256k1_v0_9_0_fe z;
|
||||
int infinity; /* whether this represents the point at infinity */
|
||||
} rustsecp256k1_v0_8_1_gej;
|
||||
} rustsecp256k1_v0_9_0_gej;
|
||||
|
||||
#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0}
|
||||
#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
|
||||
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_fe_storage x;
|
||||
rustsecp256k1_v0_8_1_fe_storage y;
|
||||
} rustsecp256k1_v0_8_1_ge_storage;
|
||||
rustsecp256k1_v0_9_0_fe_storage x;
|
||||
rustsecp256k1_v0_9_0_fe_storage y;
|
||||
} rustsecp256k1_v0_9_0_ge_storage;
|
||||
|
||||
#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))}
|
||||
|
||||
#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y)
|
||||
|
||||
/** Maximum allowed magnitudes for group element coordinates
|
||||
* in affine (x, y) and jacobian (x, y, z) representation. */
|
||||
#define SECP256K1_GE_X_MAGNITUDE_MAX 4
|
||||
#define SECP256K1_GE_Y_MAGNITUDE_MAX 3
|
||||
#define SECP256K1_GEJ_X_MAGNITUDE_MAX 4
|
||||
#define SECP256K1_GEJ_Y_MAGNITUDE_MAX 4
|
||||
#define SECP256K1_GEJ_Z_MAGNITUDE_MAX 1
|
||||
|
||||
/** Set a group element equal to the point with given X and Y coordinates */
|
||||
static void rustsecp256k1_v0_8_1_ge_set_xy(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_fe *x, const rustsecp256k1_v0_8_1_fe *y);
|
||||
static void rustsecp256k1_v0_9_0_ge_set_xy(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_fe *x, const rustsecp256k1_v0_9_0_fe *y);
|
||||
|
||||
/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
|
||||
* for Y. Return value indicates whether the result is valid. */
|
||||
static int rustsecp256k1_v0_8_1_ge_set_xo_var(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_fe *x, int odd);
|
||||
static int rustsecp256k1_v0_9_0_ge_set_xo_var(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_fe *x, int odd);
|
||||
|
||||
/** Determine whether x is a valid X coordinate on the curve. */
|
||||
static int rustsecp256k1_v0_9_0_ge_x_on_curve_var(const rustsecp256k1_v0_9_0_fe *x);
|
||||
|
||||
/** Determine whether fraction xn/xd is a valid X coordinate on the curve (xd != 0). */
|
||||
static int rustsecp256k1_v0_9_0_ge_x_frac_on_curve_var(const rustsecp256k1_v0_9_0_fe *xn, const rustsecp256k1_v0_9_0_fe *xd);
|
||||
|
||||
/** Check whether a group element is the point at infinity. */
|
||||
static int rustsecp256k1_v0_8_1_ge_is_infinity(const rustsecp256k1_v0_8_1_ge *a);
|
||||
static int rustsecp256k1_v0_9_0_ge_is_infinity(const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Check whether a group element is valid (i.e., on the curve). */
|
||||
static int rustsecp256k1_v0_8_1_ge_is_valid_var(const rustsecp256k1_v0_8_1_ge *a);
|
||||
static int rustsecp256k1_v0_9_0_ge_is_valid_var(const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
|
||||
static void rustsecp256k1_v0_8_1_ge_neg(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_neg(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Set a group element equal to another which is given in jacobian coordinates. Constant time. */
|
||||
static void rustsecp256k1_v0_8_1_ge_set_gej(rustsecp256k1_v0_8_1_ge *r, rustsecp256k1_v0_8_1_gej *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_set_gej(rustsecp256k1_v0_9_0_ge *r, rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Set a group element equal to another which is given in jacobian coordinates. */
|
||||
static void rustsecp256k1_v0_8_1_ge_set_gej_var(rustsecp256k1_v0_8_1_ge *r, rustsecp256k1_v0_8_1_gej *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_set_gej_var(rustsecp256k1_v0_9_0_ge *r, rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
|
||||
static void rustsecp256k1_v0_8_1_ge_set_all_gej_var(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_gej *a, size_t len);
|
||||
static void rustsecp256k1_v0_9_0_ge_set_all_gej_var(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_gej *a, size_t len);
|
||||
|
||||
/** Bring a batch of inputs to the same global z "denominator", based on ratios between
|
||||
* (omitted) z coordinates of adjacent elements.
|
||||
|
@ -86,72 +100,73 @@ static void rustsecp256k1_v0_8_1_ge_set_all_gej_var(rustsecp256k1_v0_8_1_ge *r,
|
|||
*
|
||||
* The coordinates of the final element a[len-1] are not changed.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_ge_table_set_globalz(size_t len, rustsecp256k1_v0_8_1_ge *a, const rustsecp256k1_v0_8_1_fe *zr);
|
||||
static void rustsecp256k1_v0_9_0_ge_table_set_globalz(size_t len, rustsecp256k1_v0_9_0_ge *a, const rustsecp256k1_v0_9_0_fe *zr);
|
||||
|
||||
/** Set a group element (affine) equal to the point at infinity. */
|
||||
static void rustsecp256k1_v0_8_1_ge_set_infinity(rustsecp256k1_v0_8_1_ge *r);
|
||||
static void rustsecp256k1_v0_9_0_ge_set_infinity(rustsecp256k1_v0_9_0_ge *r);
|
||||
|
||||
/** Set a group element (jacobian) equal to the point at infinity. */
|
||||
static void rustsecp256k1_v0_8_1_gej_set_infinity(rustsecp256k1_v0_8_1_gej *r);
|
||||
static void rustsecp256k1_v0_9_0_gej_set_infinity(rustsecp256k1_v0_9_0_gej *r);
|
||||
|
||||
/** Set a group element (jacobian) equal to another which is given in affine coordinates. */
|
||||
static void rustsecp256k1_v0_8_1_gej_set_ge(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_ge *a);
|
||||
static void rustsecp256k1_v0_9_0_gej_set_ge(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Check two group elements (jacobian) for equality in variable time. */
|
||||
static int rustsecp256k1_v0_8_1_gej_eq_var(const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_gej *b);
|
||||
static int rustsecp256k1_v0_9_0_gej_eq_var(const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_gej *b);
|
||||
|
||||
/** Compare the X coordinate of a group element (jacobian). */
|
||||
static int rustsecp256k1_v0_8_1_gej_eq_x_var(const rustsecp256k1_v0_8_1_fe *x, const rustsecp256k1_v0_8_1_gej *a);
|
||||
/** Compare the X coordinate of a group element (jacobian).
|
||||
* The magnitude of the group element's X coordinate must not exceed 31. */
|
||||
static int rustsecp256k1_v0_9_0_gej_eq_x_var(const rustsecp256k1_v0_9_0_fe *x, const rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
|
||||
static void rustsecp256k1_v0_8_1_gej_neg(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a);
|
||||
static void rustsecp256k1_v0_9_0_gej_neg(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Check whether a group element is the point at infinity. */
|
||||
static int rustsecp256k1_v0_8_1_gej_is_infinity(const rustsecp256k1_v0_8_1_gej *a);
|
||||
static int rustsecp256k1_v0_9_0_gej_is_infinity(const rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Set r equal to the double of a. Constant time. */
|
||||
static void rustsecp256k1_v0_8_1_gej_double(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a);
|
||||
static void rustsecp256k1_v0_9_0_gej_double(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
/** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */
|
||||
static void rustsecp256k1_v0_8_1_gej_double_var(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, rustsecp256k1_v0_8_1_fe *rzr);
|
||||
static void rustsecp256k1_v0_9_0_gej_double_var(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, rustsecp256k1_v0_9_0_fe *rzr);
|
||||
|
||||
/** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
|
||||
static void rustsecp256k1_v0_8_1_gej_add_var(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_gej *b, rustsecp256k1_v0_8_1_fe *rzr);
|
||||
static void rustsecp256k1_v0_9_0_gej_add_var(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_gej *b, rustsecp256k1_v0_9_0_fe *rzr);
|
||||
|
||||
/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */
|
||||
static void rustsecp256k1_v0_8_1_gej_add_ge(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_ge *b);
|
||||
static void rustsecp256k1_v0_9_0_gej_add_ge(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_ge *b);
|
||||
|
||||
/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
|
||||
than rustsecp256k1_v0_8_1_gej_add_var. It is identical to rustsecp256k1_v0_8_1_gej_add_ge but without constant-time
|
||||
than rustsecp256k1_v0_9_0_gej_add_var. It is identical to rustsecp256k1_v0_9_0_gej_add_ge but without constant-time
|
||||
guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
|
||||
static void rustsecp256k1_v0_8_1_gej_add_ge_var(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_ge *b, rustsecp256k1_v0_8_1_fe *rzr);
|
||||
static void rustsecp256k1_v0_9_0_gej_add_ge_var(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_ge *b, rustsecp256k1_v0_9_0_fe *rzr);
|
||||
|
||||
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
|
||||
static void rustsecp256k1_v0_8_1_gej_add_zinv_var(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, const rustsecp256k1_v0_8_1_ge *b, const rustsecp256k1_v0_8_1_fe *bzinv);
|
||||
static void rustsecp256k1_v0_9_0_gej_add_zinv_var(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, const rustsecp256k1_v0_9_0_ge *b, const rustsecp256k1_v0_9_0_fe *bzinv);
|
||||
|
||||
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
|
||||
static void rustsecp256k1_v0_8_1_ge_mul_lambda(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_mul_lambda(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Clear a rustsecp256k1_v0_8_1_gej to prevent leaking sensitive information. */
|
||||
static void rustsecp256k1_v0_8_1_gej_clear(rustsecp256k1_v0_8_1_gej *r);
|
||||
/** Clear a rustsecp256k1_v0_9_0_gej to prevent leaking sensitive information. */
|
||||
static void rustsecp256k1_v0_9_0_gej_clear(rustsecp256k1_v0_9_0_gej *r);
|
||||
|
||||
/** Clear a rustsecp256k1_v0_8_1_ge to prevent leaking sensitive information. */
|
||||
static void rustsecp256k1_v0_8_1_ge_clear(rustsecp256k1_v0_8_1_ge *r);
|
||||
/** Clear a rustsecp256k1_v0_9_0_ge to prevent leaking sensitive information. */
|
||||
static void rustsecp256k1_v0_9_0_ge_clear(rustsecp256k1_v0_9_0_ge *r);
|
||||
|
||||
/** Convert a group element to the storage type. */
|
||||
static void rustsecp256k1_v0_8_1_ge_to_storage(rustsecp256k1_v0_8_1_ge_storage *r, const rustsecp256k1_v0_8_1_ge *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_to_storage(rustsecp256k1_v0_9_0_ge_storage *r, const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Convert a group element back from the storage type. */
|
||||
static void rustsecp256k1_v0_8_1_ge_from_storage(rustsecp256k1_v0_8_1_ge *r, const rustsecp256k1_v0_8_1_ge_storage *a);
|
||||
static void rustsecp256k1_v0_9_0_ge_from_storage(rustsecp256k1_v0_9_0_ge *r, const rustsecp256k1_v0_9_0_ge_storage *a);
|
||||
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
|
||||
static void rustsecp256k1_v0_8_1_gej_cmov(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_gej *a, int flag);
|
||||
static void rustsecp256k1_v0_9_0_gej_cmov(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_gej *a, int flag);
|
||||
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
|
||||
static void rustsecp256k1_v0_8_1_ge_storage_cmov(rustsecp256k1_v0_8_1_ge_storage *r, const rustsecp256k1_v0_8_1_ge_storage *a, int flag);
|
||||
static void rustsecp256k1_v0_9_0_ge_storage_cmov(rustsecp256k1_v0_9_0_ge_storage *r, const rustsecp256k1_v0_9_0_ge_storage *a, int flag);
|
||||
|
||||
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
|
||||
static void rustsecp256k1_v0_8_1_gej_rescale(rustsecp256k1_v0_8_1_gej *r, const rustsecp256k1_v0_8_1_fe *b);
|
||||
static void rustsecp256k1_v0_9_0_gej_rescale(rustsecp256k1_v0_9_0_gej *r, const rustsecp256k1_v0_9_0_fe *b);
|
||||
|
||||
/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve.
|
||||
*
|
||||
|
@ -162,6 +177,12 @@ static void rustsecp256k1_v0_8_1_gej_rescale(rustsecp256k1_v0_8_1_gej *r, const
|
|||
* (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this
|
||||
* function checks whether a point that is on the curve is in fact also in that subgroup.
|
||||
*/
|
||||
static int rustsecp256k1_v0_8_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_8_1_ge* ge);
|
||||
static int rustsecp256k1_v0_9_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_9_0_ge* ge);
|
||||
|
||||
/** Check invariants on an affine group element (no-op unless VERIFY is enabled). */
|
||||
static void rustsecp256k1_v0_9_0_ge_verify(const rustsecp256k1_v0_9_0_ge *a);
|
||||
|
||||
/** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */
|
||||
static void rustsecp256k1_v0_9_0_gej_verify(const rustsecp256k1_v0_9_0_gej *a);
|
||||
|
||||
#endif /* SECP256K1_GROUP_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -14,28 +14,28 @@ typedef struct {
|
|||
uint32_t s[8];
|
||||
unsigned char buf[64];
|
||||
uint64_t bytes;
|
||||
} rustsecp256k1_v0_8_1_sha256;
|
||||
} rustsecp256k1_v0_9_0_sha256;
|
||||
|
||||
static void rustsecp256k1_v0_8_1_sha256_initialize(rustsecp256k1_v0_8_1_sha256 *hash);
|
||||
static void rustsecp256k1_v0_8_1_sha256_write(rustsecp256k1_v0_8_1_sha256 *hash, const unsigned char *data, size_t size);
|
||||
static void rustsecp256k1_v0_8_1_sha256_finalize(rustsecp256k1_v0_8_1_sha256 *hash, unsigned char *out32);
|
||||
static void rustsecp256k1_v0_9_0_sha256_initialize(rustsecp256k1_v0_9_0_sha256 *hash);
|
||||
static void rustsecp256k1_v0_9_0_sha256_write(rustsecp256k1_v0_9_0_sha256 *hash, const unsigned char *data, size_t size);
|
||||
static void rustsecp256k1_v0_9_0_sha256_finalize(rustsecp256k1_v0_9_0_sha256 *hash, unsigned char *out32);
|
||||
|
||||
typedef struct {
|
||||
rustsecp256k1_v0_8_1_sha256 inner, outer;
|
||||
} rustsecp256k1_v0_8_1_hmac_sha256;
|
||||
rustsecp256k1_v0_9_0_sha256 inner, outer;
|
||||
} rustsecp256k1_v0_9_0_hmac_sha256;
|
||||
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_initialize(rustsecp256k1_v0_8_1_hmac_sha256 *hash, const unsigned char *key, size_t size);
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_write(rustsecp256k1_v0_8_1_hmac_sha256 *hash, const unsigned char *data, size_t size);
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_finalize(rustsecp256k1_v0_8_1_hmac_sha256 *hash, unsigned char *out32);
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_initialize(rustsecp256k1_v0_9_0_hmac_sha256 *hash, const unsigned char *key, size_t size);
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_write(rustsecp256k1_v0_9_0_hmac_sha256 *hash, const unsigned char *data, size_t size);
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_finalize(rustsecp256k1_v0_9_0_hmac_sha256 *hash, unsigned char *out32);
|
||||
|
||||
typedef struct {
|
||||
unsigned char v[32];
|
||||
unsigned char k[32];
|
||||
int retry;
|
||||
} rustsecp256k1_v0_8_1_rfc6979_hmac_sha256;
|
||||
} rustsecp256k1_v0_9_0_rfc6979_hmac_sha256;
|
||||
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng);
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng);
|
||||
|
||||
#endif /* SECP256K1_HASH_H */
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
(h) = t1 + t2; \
|
||||
} while(0)
|
||||
|
||||
static void rustsecp256k1_v0_8_1_sha256_initialize(rustsecp256k1_v0_8_1_sha256 *hash) {
|
||||
static void rustsecp256k1_v0_9_0_sha256_initialize(rustsecp256k1_v0_9_0_sha256 *hash) {
|
||||
hash->s[0] = 0x6a09e667ul;
|
||||
hash->s[1] = 0xbb67ae85ul;
|
||||
hash->s[2] = 0x3c6ef372ul;
|
||||
|
@ -41,26 +41,26 @@ static void rustsecp256k1_v0_8_1_sha256_initialize(rustsecp256k1_v0_8_1_sha256 *
|
|||
}
|
||||
|
||||
/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
|
||||
static void rustsecp256k1_v0_8_1_sha256_transform(uint32_t* s, const unsigned char* buf) {
|
||||
static void rustsecp256k1_v0_9_0_sha256_transform(uint32_t* s, const unsigned char* buf) {
|
||||
uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
|
||||
uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
|
||||
|
||||
Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = rustsecp256k1_v0_8_1_read_be32(&buf[0]));
|
||||
Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = rustsecp256k1_v0_8_1_read_be32(&buf[4]));
|
||||
Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = rustsecp256k1_v0_8_1_read_be32(&buf[8]));
|
||||
Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = rustsecp256k1_v0_8_1_read_be32(&buf[12]));
|
||||
Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = rustsecp256k1_v0_8_1_read_be32(&buf[16]));
|
||||
Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = rustsecp256k1_v0_8_1_read_be32(&buf[20]));
|
||||
Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = rustsecp256k1_v0_8_1_read_be32(&buf[24]));
|
||||
Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = rustsecp256k1_v0_8_1_read_be32(&buf[28]));
|
||||
Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = rustsecp256k1_v0_8_1_read_be32(&buf[32]));
|
||||
Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = rustsecp256k1_v0_8_1_read_be32(&buf[36]));
|
||||
Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = rustsecp256k1_v0_8_1_read_be32(&buf[40]));
|
||||
Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = rustsecp256k1_v0_8_1_read_be32(&buf[44]));
|
||||
Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = rustsecp256k1_v0_8_1_read_be32(&buf[48]));
|
||||
Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = rustsecp256k1_v0_8_1_read_be32(&buf[52]));
|
||||
Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = rustsecp256k1_v0_8_1_read_be32(&buf[56]));
|
||||
Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = rustsecp256k1_v0_8_1_read_be32(&buf[60]));
|
||||
Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = rustsecp256k1_v0_9_0_read_be32(&buf[0]));
|
||||
Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = rustsecp256k1_v0_9_0_read_be32(&buf[4]));
|
||||
Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = rustsecp256k1_v0_9_0_read_be32(&buf[8]));
|
||||
Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = rustsecp256k1_v0_9_0_read_be32(&buf[12]));
|
||||
Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = rustsecp256k1_v0_9_0_read_be32(&buf[16]));
|
||||
Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = rustsecp256k1_v0_9_0_read_be32(&buf[20]));
|
||||
Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = rustsecp256k1_v0_9_0_read_be32(&buf[24]));
|
||||
Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = rustsecp256k1_v0_9_0_read_be32(&buf[28]));
|
||||
Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = rustsecp256k1_v0_9_0_read_be32(&buf[32]));
|
||||
Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = rustsecp256k1_v0_9_0_read_be32(&buf[36]));
|
||||
Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = rustsecp256k1_v0_9_0_read_be32(&buf[40]));
|
||||
Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = rustsecp256k1_v0_9_0_read_be32(&buf[44]));
|
||||
Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = rustsecp256k1_v0_9_0_read_be32(&buf[48]));
|
||||
Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = rustsecp256k1_v0_9_0_read_be32(&buf[52]));
|
||||
Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = rustsecp256k1_v0_9_0_read_be32(&buf[56]));
|
||||
Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = rustsecp256k1_v0_9_0_read_be32(&buf[60]));
|
||||
|
||||
Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
|
||||
Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
|
||||
|
@ -123,7 +123,7 @@ static void rustsecp256k1_v0_8_1_sha256_transform(uint32_t* s, const unsigned ch
|
|||
s[7] += h;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_sha256_write(rustsecp256k1_v0_8_1_sha256 *hash, const unsigned char *data, size_t len) {
|
||||
static void rustsecp256k1_v0_9_0_sha256_write(rustsecp256k1_v0_9_0_sha256 *hash, const unsigned char *data, size_t len) {
|
||||
size_t bufsize = hash->bytes & 0x3F;
|
||||
hash->bytes += len;
|
||||
VERIFY_CHECK(hash->bytes >= len);
|
||||
|
@ -133,87 +133,87 @@ static void rustsecp256k1_v0_8_1_sha256_write(rustsecp256k1_v0_8_1_sha256 *hash,
|
|||
memcpy(hash->buf + bufsize, data, chunk_len);
|
||||
data += chunk_len;
|
||||
len -= chunk_len;
|
||||
rustsecp256k1_v0_8_1_sha256_transform(hash->s, hash->buf);
|
||||
rustsecp256k1_v0_9_0_sha256_transform(hash->s, hash->buf);
|
||||
bufsize = 0;
|
||||
}
|
||||
if (len) {
|
||||
/* Fill the buffer with what remains. */
|
||||
memcpy(((unsigned char*)hash->buf) + bufsize, data, len);
|
||||
memcpy(hash->buf + bufsize, data, len);
|
||||
}
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_sha256_finalize(rustsecp256k1_v0_8_1_sha256 *hash, unsigned char *out32) {
|
||||
static void rustsecp256k1_v0_9_0_sha256_finalize(rustsecp256k1_v0_9_0_sha256 *hash, unsigned char *out32) {
|
||||
static const unsigned char pad[64] = {0x80};
|
||||
unsigned char sizedesc[8];
|
||||
int i;
|
||||
/* The maximum message size of SHA256 is 2^64-1 bits. */
|
||||
VERIFY_CHECK(hash->bytes < ((uint64_t)1 << 61));
|
||||
rustsecp256k1_v0_8_1_write_be32(&sizedesc[0], hash->bytes >> 29);
|
||||
rustsecp256k1_v0_8_1_write_be32(&sizedesc[4], hash->bytes << 3);
|
||||
rustsecp256k1_v0_8_1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
|
||||
rustsecp256k1_v0_8_1_sha256_write(hash, sizedesc, 8);
|
||||
rustsecp256k1_v0_9_0_write_be32(&sizedesc[0], hash->bytes >> 29);
|
||||
rustsecp256k1_v0_9_0_write_be32(&sizedesc[4], hash->bytes << 3);
|
||||
rustsecp256k1_v0_9_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
|
||||
rustsecp256k1_v0_9_0_sha256_write(hash, sizedesc, 8);
|
||||
for (i = 0; i < 8; i++) {
|
||||
rustsecp256k1_v0_8_1_write_be32(&out32[4*i], hash->s[i]);
|
||||
rustsecp256k1_v0_9_0_write_be32(&out32[4*i], hash->s[i]);
|
||||
hash->s[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes a sha256 struct and writes the 64 byte string
|
||||
* SHA256(tag)||SHA256(tag) into it. */
|
||||
static void rustsecp256k1_v0_8_1_sha256_initialize_tagged(rustsecp256k1_v0_8_1_sha256 *hash, const unsigned char *tag, size_t taglen) {
|
||||
static void rustsecp256k1_v0_9_0_sha256_initialize_tagged(rustsecp256k1_v0_9_0_sha256 *hash, const unsigned char *tag, size_t taglen) {
|
||||
unsigned char buf[32];
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(hash);
|
||||
rustsecp256k1_v0_8_1_sha256_write(hash, tag, taglen);
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(hash, buf);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(hash);
|
||||
rustsecp256k1_v0_9_0_sha256_write(hash, tag, taglen);
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(hash, buf);
|
||||
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(hash);
|
||||
rustsecp256k1_v0_8_1_sha256_write(hash, buf, 32);
|
||||
rustsecp256k1_v0_8_1_sha256_write(hash, buf, 32);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(hash);
|
||||
rustsecp256k1_v0_9_0_sha256_write(hash, buf, 32);
|
||||
rustsecp256k1_v0_9_0_sha256_write(hash, buf, 32);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_initialize(rustsecp256k1_v0_8_1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_initialize(rustsecp256k1_v0_9_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
|
||||
size_t n;
|
||||
unsigned char rkey[64];
|
||||
if (keylen <= sizeof(rkey)) {
|
||||
memcpy(rkey, key, keylen);
|
||||
memset(rkey + keylen, 0, sizeof(rkey) - keylen);
|
||||
} else {
|
||||
rustsecp256k1_v0_8_1_sha256 sha256;
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(&sha256);
|
||||
rustsecp256k1_v0_8_1_sha256_write(&sha256, key, keylen);
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(&sha256, rkey);
|
||||
rustsecp256k1_v0_9_0_sha256 sha256;
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(&sha256);
|
||||
rustsecp256k1_v0_9_0_sha256_write(&sha256, key, keylen);
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(&sha256, rkey);
|
||||
memset(rkey + 32, 0, 32);
|
||||
}
|
||||
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(&hash->outer);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(&hash->outer);
|
||||
for (n = 0; n < sizeof(rkey); n++) {
|
||||
rkey[n] ^= 0x5c;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_sha256_write(&hash->outer, rkey, sizeof(rkey));
|
||||
rustsecp256k1_v0_9_0_sha256_write(&hash->outer, rkey, sizeof(rkey));
|
||||
|
||||
rustsecp256k1_v0_8_1_sha256_initialize(&hash->inner);
|
||||
rustsecp256k1_v0_9_0_sha256_initialize(&hash->inner);
|
||||
for (n = 0; n < sizeof(rkey); n++) {
|
||||
rkey[n] ^= 0x5c ^ 0x36;
|
||||
}
|
||||
rustsecp256k1_v0_8_1_sha256_write(&hash->inner, rkey, sizeof(rkey));
|
||||
rustsecp256k1_v0_9_0_sha256_write(&hash->inner, rkey, sizeof(rkey));
|
||||
memset(rkey, 0, sizeof(rkey));
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_write(rustsecp256k1_v0_8_1_hmac_sha256 *hash, const unsigned char *data, size_t size) {
|
||||
rustsecp256k1_v0_8_1_sha256_write(&hash->inner, data, size);
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_write(rustsecp256k1_v0_9_0_hmac_sha256 *hash, const unsigned char *data, size_t size) {
|
||||
rustsecp256k1_v0_9_0_sha256_write(&hash->inner, data, size);
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_hmac_sha256_finalize(rustsecp256k1_v0_8_1_hmac_sha256 *hash, unsigned char *out32) {
|
||||
static void rustsecp256k1_v0_9_0_hmac_sha256_finalize(rustsecp256k1_v0_9_0_hmac_sha256 *hash, unsigned char *out32) {
|
||||
unsigned char temp[32];
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(&hash->inner, temp);
|
||||
rustsecp256k1_v0_8_1_sha256_write(&hash->outer, temp, 32);
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(&hash->inner, temp);
|
||||
rustsecp256k1_v0_9_0_sha256_write(&hash->outer, temp, 32);
|
||||
memset(temp, 0, 32);
|
||||
rustsecp256k1_v0_8_1_sha256_finalize(&hash->outer, out32);
|
||||
rustsecp256k1_v0_9_0_sha256_finalize(&hash->outer, out32);
|
||||
}
|
||||
|
||||
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
|
||||
rustsecp256k1_v0_8_1_hmac_sha256 hmac;
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
|
||||
rustsecp256k1_v0_9_0_hmac_sha256 hmac;
|
||||
static const unsigned char zero[1] = {0x00};
|
||||
static const unsigned char one[1] = {0x01};
|
||||
|
||||
|
@ -221,47 +221,47 @@ static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0
|
|||
memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
|
||||
|
||||
/* RFC6979 3.2.d. */
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, zero, 1);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, key, keylen);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->v);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, zero, 1);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, key, keylen);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->v);
|
||||
|
||||
/* RFC6979 3.2.f. */
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, one, 1);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, key, keylen);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->v);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, one, 1);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, key, keylen);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->v);
|
||||
rng->retry = 0;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
|
||||
/* RFC6979 3.2.h. */
|
||||
static const unsigned char zero[1] = {0x00};
|
||||
if (rng->retry) {
|
||||
rustsecp256k1_v0_8_1_hmac_sha256 hmac;
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, zero, 1);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->v);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256 hmac;
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, zero, 1);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->k);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->v);
|
||||
}
|
||||
|
||||
while (outlen > 0) {
|
||||
rustsecp256k1_v0_8_1_hmac_sha256 hmac;
|
||||
rustsecp256k1_v0_9_0_hmac_sha256 hmac;
|
||||
int now = outlen;
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_8_1_hmac_sha256_finalize(&hmac, rng->v);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_initialize(&hmac, rng->k, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_write(&hmac, rng->v, 32);
|
||||
rustsecp256k1_v0_9_0_hmac_sha256_finalize(&hmac, rng->v);
|
||||
if (now > 32) {
|
||||
now = 32;
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_8
|
|||
rng->retry = 1;
|
||||
}
|
||||
|
||||
static void rustsecp256k1_v0_8_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_8_1_rfc6979_hmac_sha256 *rng) {
|
||||
static void rustsecp256k1_v0_9_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_9_0_rfc6979_hmac_sha256 *rng) {
|
||||
memset(rng->k, 0, 32);
|
||||
memset(rng->v, 0, 32);
|
||||
rng->retry = 0;
|
||||
|
|
|
@ -13,72 +13,77 @@
|
|||
# endif
|
||||
|
||||
/* Construct an unsigned 128-bit value from a high and a low 64-bit value. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_load(rustsecp256k1_v0_8_1_uint128 *r, uint64_t hi, uint64_t lo);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_load(rustsecp256k1_v0_9_0_uint128 *r, uint64_t hi, uint64_t lo);
|
||||
|
||||
/* Multiply two unsigned 64-bit values a and b and write the result to r. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b);
|
||||
|
||||
/* Multiply two unsigned 64-bit values a and b and add the result to r.
|
||||
* The final result is taken modulo 2^128.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b);
|
||||
|
||||
/* Add an unsigned 64-bit value a to r.
|
||||
* The final result is taken modulo 2^128.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a);
|
||||
|
||||
/* Unsigned (logical) right shift.
|
||||
* Non-constant time in n.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_rshift(rustsecp256k1_v0_8_1_uint128 *r, unsigned int n);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_rshift(rustsecp256k1_v0_9_0_uint128 *r, unsigned int n);
|
||||
|
||||
/* Return the low 64-bits of a 128-bit value as an unsigned 64-bit value. */
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_to_u64(const rustsecp256k1_v0_8_1_uint128 *a);
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_to_u64(const rustsecp256k1_v0_9_0_uint128 *a);
|
||||
|
||||
/* Return the high 64-bits of a 128-bit value as an unsigned 64-bit value. */
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_hi_u64(const rustsecp256k1_v0_8_1_uint128 *a);
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_hi_u64(const rustsecp256k1_v0_9_0_uint128 *a);
|
||||
|
||||
/* Write an unsigned 64-bit value to r. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_from_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_from_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a);
|
||||
|
||||
/* Tests if r is strictly less than to 2^n.
|
||||
* n must be strictly less than 128.
|
||||
*/
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_u128_check_bits(const rustsecp256k1_v0_8_1_uint128 *r, unsigned int n);
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_u128_check_bits(const rustsecp256k1_v0_9_0_uint128 *r, unsigned int n);
|
||||
|
||||
/* Construct an signed 128-bit value from a high and a low 64-bit value. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_load(rustsecp256k1_v0_8_1_int128 *r, int64_t hi, uint64_t lo);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_load(rustsecp256k1_v0_9_0_int128 *r, int64_t hi, uint64_t lo);
|
||||
|
||||
/* Multiply two signed 64-bit values a and b and write the result to r. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b);
|
||||
|
||||
/* Multiply two signed 64-bit values a and b and add the result to r.
|
||||
* Overflow or underflow from the addition is undefined behaviour.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_accum_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_accum_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b);
|
||||
|
||||
/* Compute a*d - b*c from signed 64-bit values and write the result to r. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_det(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_det(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d);
|
||||
|
||||
/* Signed (arithmetic) right shift.
|
||||
* Non-constant time in b.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_rshift(rustsecp256k1_v0_8_1_int128 *r, unsigned int b);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_rshift(rustsecp256k1_v0_9_0_int128 *r, unsigned int b);
|
||||
|
||||
/* Return the low 64-bits of a 128-bit value interpreted as an signed 64-bit value. */
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_i128_to_i64(const rustsecp256k1_v0_8_1_int128 *a);
|
||||
/* Return the input value modulo 2^64. */
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_i128_to_u64(const rustsecp256k1_v0_9_0_int128 *a);
|
||||
|
||||
/* Return the value as a signed 64-bit value.
|
||||
* Requires the input to be between INT64_MIN and INT64_MAX.
|
||||
*/
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_0_i128_to_i64(const rustsecp256k1_v0_9_0_int128 *a);
|
||||
|
||||
/* Write a signed 64-bit value to r. */
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_from_i64(rustsecp256k1_v0_8_1_int128 *r, int64_t a);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_from_i64(rustsecp256k1_v0_9_0_int128 *r, int64_t a);
|
||||
|
||||
/* Compare two 128-bit values for equality. */
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_eq_var(const rustsecp256k1_v0_8_1_int128 *a, const rustsecp256k1_v0_8_1_int128 *b);
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_eq_var(const rustsecp256k1_v0_9_0_int128 *a, const rustsecp256k1_v0_9_0_int128 *b);
|
||||
|
||||
/* Tests if r is equal to 2^n.
|
||||
/* Tests if r is equal to sign*2^n (sign must be 1 or -1).
|
||||
* n must be strictly less than 127.
|
||||
*/
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_check_pow2(const rustsecp256k1_v0_8_1_int128 *r, unsigned int n);
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_check_pow2(const rustsecp256k1_v0_9_0_int128 *r, unsigned int n, int sign);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t;
|
|||
/* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */
|
||||
#endif
|
||||
|
||||
typedef uint128_t rustsecp256k1_v0_8_1_uint128;
|
||||
typedef int128_t rustsecp256k1_v0_8_1_int128;
|
||||
typedef uint128_t rustsecp256k1_v0_9_0_uint128;
|
||||
typedef int128_t rustsecp256k1_v0_9_0_int128;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2,86 +2,93 @@
|
|||
#define SECP256K1_INT128_NATIVE_IMPL_H
|
||||
|
||||
#include "int128.h"
|
||||
#include "util.h"
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_load(rustsecp256k1_v0_8_1_uint128 *r, uint64_t hi, uint64_t lo) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_load(rustsecp256k1_v0_9_0_uint128 *r, uint64_t hi, uint64_t lo) {
|
||||
*r = (((uint128_t)hi) << 64) + lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b) {
|
||||
*r = (uint128_t)a * b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b) {
|
||||
*r += (uint128_t)a * b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a) {
|
||||
*r += a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_rshift(rustsecp256k1_v0_8_1_uint128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_rshift(rustsecp256k1_v0_9_0_uint128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
*r >>= n;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_to_u64(const rustsecp256k1_v0_8_1_uint128 *a) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_to_u64(const rustsecp256k1_v0_9_0_uint128 *a) {
|
||||
return (uint64_t)(*a);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_hi_u64(const rustsecp256k1_v0_8_1_uint128 *a) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_hi_u64(const rustsecp256k1_v0_9_0_uint128 *a) {
|
||||
return (uint64_t)(*a >> 64);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_from_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_from_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a) {
|
||||
*r = a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_u128_check_bits(const rustsecp256k1_v0_8_1_uint128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_u128_check_bits(const rustsecp256k1_v0_9_0_uint128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
return (*r >> n == 0);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_load(rustsecp256k1_v0_8_1_int128 *r, int64_t hi, uint64_t lo) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_load(rustsecp256k1_v0_9_0_int128 *r, int64_t hi, uint64_t lo) {
|
||||
*r = (((uint128_t)(uint64_t)hi) << 64) + lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b) {
|
||||
*r = (int128_t)a * b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_accum_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_accum_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b) {
|
||||
int128_t ab = (int128_t)a * b;
|
||||
VERIFY_CHECK(0 <= ab ? *r <= INT128_MAX - ab : INT128_MIN - ab <= *r);
|
||||
*r += ab;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_det(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_det(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
|
||||
int128_t ad = (int128_t)a * d;
|
||||
int128_t bc = (int128_t)b * c;
|
||||
VERIFY_CHECK(0 <= bc ? INT128_MIN + bc <= ad : ad <= INT128_MAX + bc);
|
||||
*r = ad - bc;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_rshift(rustsecp256k1_v0_8_1_int128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_rshift(rustsecp256k1_v0_9_0_int128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
*r >>= n;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_i128_to_i64(const rustsecp256k1_v0_8_1_int128 *a) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_i128_to_u64(const rustsecp256k1_v0_9_0_int128 *a) {
|
||||
return (uint64_t)*a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_0_i128_to_i64(const rustsecp256k1_v0_9_0_int128 *a) {
|
||||
VERIFY_CHECK(INT64_MIN <= *a && *a <= INT64_MAX);
|
||||
return *a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_from_i64(rustsecp256k1_v0_8_1_int128 *r, int64_t a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_from_i64(rustsecp256k1_v0_9_0_int128 *r, int64_t a) {
|
||||
*r = a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_eq_var(const rustsecp256k1_v0_8_1_int128 *a, const rustsecp256k1_v0_8_1_int128 *b) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_eq_var(const rustsecp256k1_v0_9_0_int128 *a, const rustsecp256k1_v0_9_0_int128 *b) {
|
||||
return *a == *b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_check_pow2(const rustsecp256k1_v0_8_1_int128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_check_pow2(const rustsecp256k1_v0_9_0_int128 *r, unsigned int n, int sign) {
|
||||
VERIFY_CHECK(n < 127);
|
||||
return (*r == (int128_t)1 << n);
|
||||
VERIFY_CHECK(sign == 1 || sign == -1);
|
||||
return (*r == (int128_t)((uint128_t)sign << n));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
typedef struct {
|
||||
uint64_t lo;
|
||||
uint64_t hi;
|
||||
} rustsecp256k1_v0_8_1_uint128;
|
||||
} rustsecp256k1_v0_9_0_uint128;
|
||||
|
||||
typedef rustsecp256k1_v0_8_1_uint128 rustsecp256k1_v0_8_1_int128;
|
||||
typedef rustsecp256k1_v0_9_0_uint128 rustsecp256k1_v0_9_0_int128;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define SECP256K1_INT128_STRUCT_IMPL_H
|
||||
|
||||
#include "int128.h"
|
||||
#include "util.h"
|
||||
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */
|
||||
# include <intrin.h>
|
||||
|
@ -12,23 +13,23 @@
|
|||
# if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
|
||||
# pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.")
|
||||
# endif
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
|
||||
*hi = __umulh(a, b);
|
||||
return a * b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_mul128(int64_t a, int64_t b, int64_t* hi) {
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_0_mul128(int64_t a, int64_t b, int64_t* hi) {
|
||||
*hi = __mulh(a, b);
|
||||
return (uint64_t)a * (uint64_t)b;
|
||||
}
|
||||
# else
|
||||
/* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
|
||||
# define rustsecp256k1_v0_8_1_umul128 _umul128
|
||||
# define rustsecp256k1_v0_8_1_mul128 _mul128
|
||||
# define rustsecp256k1_v0_9_0_umul128 _umul128
|
||||
# define rustsecp256k1_v0_9_0_mul128 _mul128
|
||||
# endif
|
||||
#else
|
||||
/* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
|
||||
uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
|
||||
uint64_t lh = (uint32_t)a * (b >> 32);
|
||||
uint64_t hl = (a >> 32) * (uint32_t)b;
|
||||
|
@ -38,7 +39,7 @@ static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_umul128(uint64_t a, uint64
|
|||
return (mid34 << 32) + (uint32_t)ll;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_mul128(int64_t a, int64_t b, int64_t* hi) {
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_0_mul128(int64_t a, int64_t b, int64_t* hi) {
|
||||
uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
|
||||
int64_t lh = (uint32_t)a * (b >> 32);
|
||||
int64_t hl = (a >> 32) * (uint32_t)b;
|
||||
|
@ -49,23 +50,23 @@ static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_mul128(int64_t a, int64_t b
|
|||
}
|
||||
#endif
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_load(rustsecp256k1_v0_8_1_uint128 *r, uint64_t hi, uint64_t lo) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_load(rustsecp256k1_v0_9_0_uint128 *r, uint64_t hi, uint64_t lo) {
|
||||
r->hi = hi;
|
||||
r->lo = lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b) {
|
||||
r->lo = rustsecp256k1_v0_8_1_umul128(a, b, &r->hi);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b) {
|
||||
r->lo = rustsecp256k1_v0_9_0_umul128(a, b, &r->hi);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_mul(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a, uint64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_mul(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a, uint64_t b) {
|
||||
uint64_t lo, hi;
|
||||
lo = rustsecp256k1_v0_8_1_umul128(a, b, &hi);
|
||||
lo = rustsecp256k1_v0_9_0_umul128(a, b, &hi);
|
||||
r->lo += lo;
|
||||
r->hi += hi + (r->lo < lo);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_accum_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a) {
|
||||
r->lo += a;
|
||||
r->hi += r->lo < a;
|
||||
}
|
||||
|
@ -73,50 +74,55 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_accum_u64(rustsecp256k1_v
|
|||
/* Unsigned (logical) right shift.
|
||||
* Non-constant time in n.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_rshift(rustsecp256k1_v0_8_1_uint128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_rshift(rustsecp256k1_v0_9_0_uint128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
if (n >= 64) {
|
||||
r->lo = r->hi >> (n-64);
|
||||
r->hi = 0;
|
||||
} else if (n > 0) {
|
||||
#if defined(_MSC_VER) && defined(_M_X64)
|
||||
VERIFY_CHECK(n < 64);
|
||||
r->lo = __shiftright128(r->lo, r->hi, n);
|
||||
#else
|
||||
r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
|
||||
#endif
|
||||
r->hi >>= n;
|
||||
}
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_to_u64(const rustsecp256k1_v0_8_1_uint128 *a) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_to_u64(const rustsecp256k1_v0_9_0_uint128 *a) {
|
||||
return a->lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_1_u128_hi_u64(const rustsecp256k1_v0_8_1_uint128 *a) {
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_u128_hi_u64(const rustsecp256k1_v0_9_0_uint128 *a) {
|
||||
return a->hi;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_u128_from_u64(rustsecp256k1_v0_8_1_uint128 *r, uint64_t a) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_u128_from_u64(rustsecp256k1_v0_9_0_uint128 *r, uint64_t a) {
|
||||
r->hi = 0;
|
||||
r->lo = a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_u128_check_bits(const rustsecp256k1_v0_8_1_uint128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_u128_check_bits(const rustsecp256k1_v0_9_0_uint128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
return n >= 64 ? r->hi >> (n - 64) == 0
|
||||
: r->hi == 0 && r->lo >> n == 0;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_load(rustsecp256k1_v0_8_1_int128 *r, int64_t hi, uint64_t lo) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_load(rustsecp256k1_v0_9_0_int128 *r, int64_t hi, uint64_t lo) {
|
||||
r->hi = hi;
|
||||
r->lo = lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b) {
|
||||
int64_t hi;
|
||||
r->lo = (uint64_t)rustsecp256k1_v0_8_1_mul128(a, b, &hi);
|
||||
r->lo = (uint64_t)rustsecp256k1_v0_9_0_mul128(a, b, &hi);
|
||||
r->hi = (uint64_t)hi;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_accum_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_accum_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b) {
|
||||
int64_t hi;
|
||||
uint64_t lo = (uint64_t)rustsecp256k1_v0_8_1_mul128(a, b, &hi);
|
||||
uint64_t lo = (uint64_t)rustsecp256k1_v0_9_0_mul128(a, b, &hi);
|
||||
r->lo += lo;
|
||||
hi += r->lo < lo;
|
||||
/* Verify no overflow.
|
||||
|
@ -133,9 +139,9 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_accum_mul(rustsecp256k1_v
|
|||
r->hi += hi;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_dissip_mul(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_dissip_mul(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b) {
|
||||
int64_t hi;
|
||||
uint64_t lo = (uint64_t)rustsecp256k1_v0_8_1_mul128(a, b, &hi);
|
||||
uint64_t lo = (uint64_t)rustsecp256k1_v0_9_0_mul128(a, b, &hi);
|
||||
hi += r->lo < lo;
|
||||
/* Verify no overflow.
|
||||
* If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set),
|
||||
|
@ -151,15 +157,15 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_dissip_mul(rustsecp256k1_
|
|||
r->lo -= lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_det(rustsecp256k1_v0_8_1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
|
||||
rustsecp256k1_v0_8_1_i128_mul(r, a, d);
|
||||
rustsecp256k1_v0_8_1_i128_dissip_mul(r, b, c);
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_det(rustsecp256k1_v0_9_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
|
||||
rustsecp256k1_v0_9_0_i128_mul(r, a, d);
|
||||
rustsecp256k1_v0_9_0_i128_dissip_mul(r, b, c);
|
||||
}
|
||||
|
||||
/* Signed (arithmetic) right shift.
|
||||
* Non-constant time in n.
|
||||
*/
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_rshift(rustsecp256k1_v0_8_1_int128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_rshift(rustsecp256k1_v0_9_0_int128 *r, unsigned int n) {
|
||||
VERIFY_CHECK(n < 128);
|
||||
if (n >= 64) {
|
||||
r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64));
|
||||
|
@ -170,23 +176,30 @@ static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_rshift(rustsecp256k1_v0_8
|
|||
}
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_1_i128_to_i64(const rustsecp256k1_v0_8_1_int128 *a) {
|
||||
return (int64_t)a->lo;
|
||||
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_9_0_i128_to_u64(const rustsecp256k1_v0_9_0_int128 *a) {
|
||||
return a->lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_8_1_i128_from_i64(rustsecp256k1_v0_8_1_int128 *r, int64_t a) {
|
||||
static SECP256K1_INLINE int64_t rustsecp256k1_v0_9_0_i128_to_i64(const rustsecp256k1_v0_9_0_int128 *a) {
|
||||
/* Verify that a represents a 64 bit signed value by checking that the high bits are a sign extension of the low bits. */
|
||||
VERIFY_CHECK(a->hi == -(a->lo >> 63));
|
||||
return (int64_t)rustsecp256k1_v0_9_0_i128_to_u64(a);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void rustsecp256k1_v0_9_0_i128_from_i64(rustsecp256k1_v0_9_0_int128 *r, int64_t a) {
|
||||
r->hi = (uint64_t)(a >> 63);
|
||||
r->lo = (uint64_t)a;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_eq_var(const rustsecp256k1_v0_8_1_int128 *a, const rustsecp256k1_v0_8_1_int128 *b) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_eq_var(const rustsecp256k1_v0_9_0_int128 *a, const rustsecp256k1_v0_9_0_int128 *b) {
|
||||
return a->hi == b->hi && a->lo == b->lo;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_8_1_i128_check_pow2(const rustsecp256k1_v0_8_1_int128 *r, unsigned int n) {
|
||||
static SECP256K1_INLINE int rustsecp256k1_v0_9_0_i128_check_pow2(const rustsecp256k1_v0_9_0_int128 *r, unsigned int n, int sign) {
|
||||
VERIFY_CHECK(n < 127);
|
||||
return n >= 64 ? r->hi == (uint64_t)1 << (n - 64) && r->lo == 0
|
||||
: r->hi == 0 && r->lo == (uint64_t)1 << n;
|
||||
VERIFY_CHECK(sign == 1 || sign == -1);
|
||||
return n >= 64 ? r->hi == (uint64_t)sign << (n - 64) && r->lo == 0
|
||||
: r->hi == (uint64_t)(sign >> 1) && r->lo == (uint64_t)sign << n;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,10 +7,6 @@
|
|||
#ifndef SECP256K1_MODINV32_H
|
||||
#define SECP256K1_MODINV32_H
|
||||
|
||||
#if defined HAVE_CONFIG_H
|
||||
#include "libsecp256k1-config.h"
|
||||
#endif
|
||||
|
||||
#include "util.h"
|
||||
|
||||
/* A signed 30-bit limb representation of integers.
|
||||
|
@ -18,15 +14,15 @@
|
|||
* Its value is sum(v[i] * 2^(30*i), i=0..8). */
|
||||
typedef struct {
|
||||
int32_t v[9];
|
||||
} rustsecp256k1_v0_8_1_modinv32_signed30;
|
||||
} rustsecp256k1_v0_9_0_modinv32_signed30;
|
||||
|
||||
typedef struct {
|
||||
/* The modulus in signed30 notation, must be odd and in [3, 2^256]. */
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 modulus;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 modulus;
|
||||
|
||||
/* modulus^{-1} mod 2^30 */
|
||||
uint32_t modulus_inv30;
|
||||
} rustsecp256k1_v0_8_1_modinv32_modinfo;
|
||||
} rustsecp256k1_v0_9_0_modinv32_modinfo;
|
||||
|
||||
/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus).
|
||||
* If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of
|
||||
|
@ -34,9 +30,14 @@ typedef struct {
|
|||
*
|
||||
* On output, all of x's limbs will be in [0, 2^30).
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_modinv32_var(rustsecp256k1_v0_8_1_modinv32_signed30 *x, const rustsecp256k1_v0_8_1_modinv32_modinfo *modinfo);
|
||||
static void rustsecp256k1_v0_9_0_modinv32_var(rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo);
|
||||
|
||||
/* Same as rustsecp256k1_v0_8_1_modinv32_var, but constant time in x (not in the modulus). */
|
||||
static void rustsecp256k1_v0_8_1_modinv32(rustsecp256k1_v0_8_1_modinv32_signed30 *x, const rustsecp256k1_v0_8_1_modinv32_modinfo *modinfo);
|
||||
/* Same as rustsecp256k1_v0_9_0_modinv32_var, but constant time in x (not in the modulus). */
|
||||
static void rustsecp256k1_v0_9_0_modinv32(rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo);
|
||||
|
||||
/* Compute the Jacobi symbol for (x | modinfo->modulus). x must be coprime with modulus (and thus
|
||||
* cannot be 0, as modulus >= 3). All limbs of x must be non-negative. Returns 0 if the result
|
||||
* cannot be computed. */
|
||||
static int rustsecp256k1_v0_9_0_jacobi32_maybe_var(const rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo);
|
||||
|
||||
#endif /* SECP256K1_MODINV32_H */
|
||||
|
|
|
@ -21,10 +21,10 @@
|
|||
*/
|
||||
|
||||
#ifdef VERIFY
|
||||
static const rustsecp256k1_v0_8_1_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}};
|
||||
static const rustsecp256k1_v0_9_0_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}};
|
||||
|
||||
/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */
|
||||
static void rustsecp256k1_v0_8_1_modinv32_mul_30(rustsecp256k1_v0_8_1_modinv32_signed30 *r, const rustsecp256k1_v0_8_1_modinv32_signed30 *a, int alen, int32_t factor) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_mul_30(rustsecp256k1_v0_9_0_modinv32_signed30 *r, const rustsecp256k1_v0_9_0_modinv32_signed30 *a, int alen, int32_t factor) {
|
||||
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
|
||||
int64_t c = 0;
|
||||
int i;
|
||||
|
@ -38,11 +38,11 @@ static void rustsecp256k1_v0_8_1_modinv32_mul_30(rustsecp256k1_v0_8_1_modinv32_s
|
|||
}
|
||||
|
||||
/* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A consists of alen limbs; b has 9. */
|
||||
static int rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(const rustsecp256k1_v0_8_1_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_8_1_modinv32_signed30 *b, int32_t factor) {
|
||||
static int rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(const rustsecp256k1_v0_9_0_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_9_0_modinv32_signed30 *b, int32_t factor) {
|
||||
int i;
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 am, bm;
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_30(&bm, b, 9, factor);
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 am, bm;
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_30(&bm, b, 9, factor);
|
||||
for (i = 0; i < 8; ++i) {
|
||||
/* Verify that all but the top limb of a and b are normalized. */
|
||||
VERIFY_CHECK(am.v[i] >> 30 == 0);
|
||||
|
@ -60,11 +60,11 @@ static int rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(const rustsecp256k1_v0_8_1_m
|
|||
* to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
|
||||
* process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range
|
||||
* [0,2^30). */
|
||||
static void rustsecp256k1_v0_8_1_modinv32_normalize_30(rustsecp256k1_v0_8_1_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_8_1_modinv32_modinfo *modinfo) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_normalize_30(rustsecp256k1_v0_9_0_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo) {
|
||||
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
|
||||
int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4],
|
||||
r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8];
|
||||
int32_t cond_add, cond_negate;
|
||||
volatile int32_t cond_add, cond_negate;
|
||||
|
||||
#ifdef VERIFY
|
||||
/* Verify that all limbs are in range (-2^30,2^30). */
|
||||
|
@ -73,8 +73,8 @@ static void rustsecp256k1_v0_8_1_modinv32_normalize_30(rustsecp256k1_v0_8_1_modi
|
|||
VERIFY_CHECK(r->v[i] >= -M30);
|
||||
VERIFY_CHECK(r->v[i] <= M30);
|
||||
}
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
|
||||
#endif
|
||||
|
||||
/* In a first step, add the modulus if the input is negative, and then negate if requested.
|
||||
|
@ -154,8 +154,8 @@ static void rustsecp256k1_v0_8_1_modinv32_normalize_30(rustsecp256k1_v0_8_1_modi
|
|||
VERIFY_CHECK(r6 >> 30 == 0);
|
||||
VERIFY_CHECK(r7 >> 30 == 0);
|
||||
VERIFY_CHECK(r8 >> 30 == 0);
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ static void rustsecp256k1_v0_8_1_modinv32_normalize_30(rustsecp256k1_v0_8_1_modi
|
|||
*/
|
||||
typedef struct {
|
||||
int32_t u, v, q, r;
|
||||
} rustsecp256k1_v0_8_1_modinv32_trans2x2;
|
||||
} rustsecp256k1_v0_9_0_modinv32_trans2x2;
|
||||
|
||||
/* Compute the transition matrix and zeta for 30 divsteps.
|
||||
*
|
||||
|
@ -178,7 +178,7 @@ typedef struct {
|
|||
*
|
||||
* Implements the divsteps_n_matrix function from the explanation.
|
||||
*/
|
||||
static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_8_1_modinv32_trans2x2 *t) {
|
||||
static int32_t rustsecp256k1_v0_9_0_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_0_modinv32_trans2x2 *t) {
|
||||
/* u,v,q,r are the elements of the transformation matrix being built up,
|
||||
* starting with the identity matrix. Semantically they are signed integers
|
||||
* in range [-2^30,2^30], but here represented as unsigned mod 2^32. This
|
||||
|
@ -186,7 +186,8 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30(int32_t zeta, uint32_t
|
|||
* being inside [-2^31,2^31) means that casting to signed works correctly.
|
||||
*/
|
||||
uint32_t u = 1, v = 0, q = 0, r = 1;
|
||||
uint32_t c1, c2, f = f0, g = g0, x, y, z;
|
||||
volatile uint32_t c1, c2;
|
||||
uint32_t mask1, mask2, f = f0, g = g0, x, y, z;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 30; ++i) {
|
||||
|
@ -195,23 +196,25 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30(int32_t zeta, uint32_t
|
|||
VERIFY_CHECK((q * f0 + r * g0) == g << i);
|
||||
/* Compute conditional masks for (zeta < 0) and for (g & 1). */
|
||||
c1 = zeta >> 31;
|
||||
c2 = -(g & 1);
|
||||
mask1 = c1;
|
||||
c2 = g & 1;
|
||||
mask2 = -c2;
|
||||
/* Compute x,y,z, conditionally negated versions of f,u,v. */
|
||||
x = (f ^ c1) - c1;
|
||||
y = (u ^ c1) - c1;
|
||||
z = (v ^ c1) - c1;
|
||||
x = (f ^ mask1) - mask1;
|
||||
y = (u ^ mask1) - mask1;
|
||||
z = (v ^ mask1) - mask1;
|
||||
/* Conditionally add x,y,z to g,q,r. */
|
||||
g += x & c2;
|
||||
q += y & c2;
|
||||
r += z & c2;
|
||||
/* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
|
||||
c1 &= c2;
|
||||
g += x & mask2;
|
||||
q += y & mask2;
|
||||
r += z & mask2;
|
||||
/* In what follows, mask1 is a condition mask for (zeta < 0) and (g & 1). */
|
||||
mask1 &= mask2;
|
||||
/* Conditionally change zeta into -zeta-2 or zeta-1. */
|
||||
zeta = (zeta ^ c1) - 1;
|
||||
zeta = (zeta ^ mask1) - 1;
|
||||
/* Conditionally add g,q,r to f,u,v. */
|
||||
f += g & c1;
|
||||
u += q & c1;
|
||||
v += r & c1;
|
||||
f += g & mask1;
|
||||
u += q & mask1;
|
||||
v += r & mask1;
|
||||
/* Shifts */
|
||||
g >>= 1;
|
||||
u <<= 1;
|
||||
|
@ -232,19 +235,8 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30(int32_t zeta, uint32_t
|
|||
return zeta;
|
||||
}
|
||||
|
||||
/* Compute the transition matrix and eta for 30 divsteps (variable time).
|
||||
*
|
||||
* Input: eta: initial eta
|
||||
* f0: bottom limb of initial f
|
||||
* g0: bottom limb of initial g
|
||||
* Output: t: transition matrix
|
||||
* Return: final eta
|
||||
*
|
||||
* Implements the divsteps_n_matrix_var function from the explanation.
|
||||
*/
|
||||
static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_8_1_modinv32_trans2x2 *t) {
|
||||
/* inv256[i] = -(2*i+1)^-1 (mod 256) */
|
||||
static const uint8_t inv256[128] = {
|
||||
/* rustsecp256k1_v0_9_0_modinv32_inv256[i] = -(2*i+1)^-1 (mod 256) */
|
||||
static const uint8_t rustsecp256k1_v0_9_0_modinv32_inv256[128] = {
|
||||
0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59,
|
||||
0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31,
|
||||
0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89,
|
||||
|
@ -258,7 +250,18 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32
|
|||
0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01
|
||||
};
|
||||
|
||||
/* Transformation matrix; see comments in rustsecp256k1_v0_8_1_modinv32_divsteps_30. */
|
||||
/* Compute the transition matrix and eta for 30 divsteps (variable time).
|
||||
*
|
||||
* Input: eta: initial eta
|
||||
* f0: bottom limb of initial f
|
||||
* g0: bottom limb of initial g
|
||||
* Output: t: transition matrix
|
||||
* Return: final eta
|
||||
*
|
||||
* Implements the divsteps_n_matrix_var function from the explanation.
|
||||
*/
|
||||
static int32_t rustsecp256k1_v0_9_0_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_0_modinv32_trans2x2 *t) {
|
||||
/* Transformation matrix; see comments in rustsecp256k1_v0_9_0_modinv32_divsteps_30. */
|
||||
uint32_t u = 1, v = 0, q = 0, r = 1;
|
||||
uint32_t f = f0, g = g0, m;
|
||||
uint16_t w;
|
||||
|
@ -266,7 +269,7 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32
|
|||
|
||||
for (;;) {
|
||||
/* Use a sentinel bit to count zeros only up to i. */
|
||||
zeros = rustsecp256k1_v0_8_1_ctz32_var(g | (UINT32_MAX << i));
|
||||
zeros = rustsecp256k1_v0_9_0_ctz32_var(g | (UINT32_MAX << i));
|
||||
/* Perform zeros divsteps at once; they all just divide g by two. */
|
||||
g >>= zeros;
|
||||
u <<= zeros;
|
||||
|
@ -297,7 +300,7 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32
|
|||
VERIFY_CHECK(limit > 0 && limit <= 30);
|
||||
m = (UINT32_MAX >> (32 - limit)) & 255U;
|
||||
/* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */
|
||||
w = (g * inv256[(f >> 1) & 127]) & m;
|
||||
w = (g * rustsecp256k1_v0_9_0_modinv32_inv256[(f >> 1) & 127]) & m;
|
||||
/* Do so. */
|
||||
g += f * w;
|
||||
q += u * w;
|
||||
|
@ -317,6 +320,86 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32
|
|||
return eta;
|
||||
}
|
||||
|
||||
/* Compute the transition matrix and eta for 30 posdivsteps (variable time, eta=-delta), and keeps track
|
||||
* of the Jacobi symbol along the way. f0 and g0 must be f and g mod 2^32 rather than 2^30, because
|
||||
* Jacobi tracking requires knowing (f mod 8) rather than just (f mod 2).
|
||||
*
|
||||
* Input: eta: initial eta
|
||||
* f0: bottom limb of initial f
|
||||
* g0: bottom limb of initial g
|
||||
* Output: t: transition matrix
|
||||
* Input/Output: (*jacp & 1) is bitflipped if and only if the Jacobi symbol of (f | g) changes sign
|
||||
* by applying the returned transformation matrix to it. The other bits of *jacp may
|
||||
* change, but are meaningless.
|
||||
* Return: final eta
|
||||
*/
|
||||
static int32_t rustsecp256k1_v0_9_0_modinv32_posdivsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_9_0_modinv32_trans2x2 *t, int *jacp) {
|
||||
/* Transformation matrix. */
|
||||
uint32_t u = 1, v = 0, q = 0, r = 1;
|
||||
uint32_t f = f0, g = g0, m;
|
||||
uint16_t w;
|
||||
int i = 30, limit, zeros;
|
||||
int jac = *jacp;
|
||||
|
||||
for (;;) {
|
||||
/* Use a sentinel bit to count zeros only up to i. */
|
||||
zeros = rustsecp256k1_v0_9_0_ctz32_var(g | (UINT32_MAX << i));
|
||||
/* Perform zeros divsteps at once; they all just divide g by two. */
|
||||
g >>= zeros;
|
||||
u <<= zeros;
|
||||
v <<= zeros;
|
||||
eta -= zeros;
|
||||
i -= zeros;
|
||||
/* Update the bottom bit of jac: when dividing g by an odd power of 2,
|
||||
* if (f mod 8) is 3 or 5, the Jacobi symbol changes sign. */
|
||||
jac ^= (zeros & ((f >> 1) ^ (f >> 2)));
|
||||
/* We're done once we've done 30 posdivsteps. */
|
||||
if (i == 0) break;
|
||||
VERIFY_CHECK((f & 1) == 1);
|
||||
VERIFY_CHECK((g & 1) == 1);
|
||||
VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i));
|
||||
VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i));
|
||||
/* If eta is negative, negate it and replace f,g with g,f. */
|
||||
if (eta < 0) {
|
||||
uint32_t tmp;
|
||||
eta = -eta;
|
||||
/* Update bottom bit of jac: when swapping f and g, the Jacobi symbol changes sign
|
||||
* if both f and g are 3 mod 4. */
|
||||
jac ^= ((f & g) >> 1);
|
||||
tmp = f; f = g; g = tmp;
|
||||
tmp = u; u = q; q = tmp;
|
||||
tmp = v; v = r; r = tmp;
|
||||
}
|
||||
/* eta is now >= 0. In what follows we're going to cancel out the bottom bits of g. No more
|
||||
* than i can be cancelled out (as we'd be done before that point), and no more than eta+1
|
||||
* can be done as its sign will flip once that happens. */
|
||||
limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
|
||||
/* m is a mask for the bottom min(limit, 8) bits (our table only supports 8 bits). */
|
||||
VERIFY_CHECK(limit > 0 && limit <= 30);
|
||||
m = (UINT32_MAX >> (32 - limit)) & 255U;
|
||||
/* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */
|
||||
w = (g * rustsecp256k1_v0_9_0_modinv32_inv256[(f >> 1) & 127]) & m;
|
||||
/* Do so. */
|
||||
g += f * w;
|
||||
q += u * w;
|
||||
r += v * w;
|
||||
VERIFY_CHECK((g & m) == 0);
|
||||
}
|
||||
/* Return data in t and return value. */
|
||||
t->u = (int32_t)u;
|
||||
t->v = (int32_t)v;
|
||||
t->q = (int32_t)q;
|
||||
t->r = (int32_t)r;
|
||||
/* The determinant of t must be a power of two. This guarantees that multiplication with t
|
||||
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
|
||||
* will be divided out again). As each divstep's individual matrix has determinant 2 or -2,
|
||||
* the aggregate of 30 of them will have determinant 2^30 or -2^30. */
|
||||
VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30 ||
|
||||
(int64_t)t->u * t->r - (int64_t)t->v * t->q == -(((int64_t)1) << 30));
|
||||
*jacp = jac;
|
||||
return eta;
|
||||
}
|
||||
|
||||
/* Compute (t/2^30) * [d, e] mod modulus, where t is a transition matrix for 30 divsteps.
|
||||
*
|
||||
* On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
|
||||
|
@ -324,21 +407,19 @@ static int32_t rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(int32_t eta, uint32
|
|||
*
|
||||
* This implements the update_de function from the explanation.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_modinv32_update_de_30(rustsecp256k1_v0_8_1_modinv32_signed30 *d, rustsecp256k1_v0_8_1_modinv32_signed30 *e, const rustsecp256k1_v0_8_1_modinv32_trans2x2 *t, const rustsecp256k1_v0_8_1_modinv32_modinfo* modinfo) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_update_de_30(rustsecp256k1_v0_9_0_modinv32_signed30 *d, rustsecp256k1_v0_9_0_modinv32_signed30 *e, const rustsecp256k1_v0_9_0_modinv32_trans2x2 *t, const rustsecp256k1_v0_9_0_modinv32_modinfo* modinfo) {
|
||||
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
|
||||
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
|
||||
int32_t di, ei, md, me, sd, se;
|
||||
int64_t cd, ce;
|
||||
int i;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
|
||||
VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */
|
||||
VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */
|
||||
VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */
|
||||
VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
|
||||
VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */
|
||||
VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */
|
||||
#endif
|
||||
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
|
||||
sd = d->v[8] >> 31;
|
||||
|
@ -375,10 +456,10 @@ static void rustsecp256k1_v0_8_1_modinv32_update_de_30(rustsecp256k1_v0_8_1_modi
|
|||
d->v[8] = (int32_t)cd;
|
||||
e->v[8] = (int32_t)ce;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -386,7 +467,7 @@ static void rustsecp256k1_v0_8_1_modinv32_update_de_30(rustsecp256k1_v0_8_1_modi
|
|||
*
|
||||
* This implements the update_fg function from the explanation.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_modinv32_update_fg_30(rustsecp256k1_v0_8_1_modinv32_signed30 *f, rustsecp256k1_v0_8_1_modinv32_signed30 *g, const rustsecp256k1_v0_8_1_modinv32_trans2x2 *t) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_update_fg_30(rustsecp256k1_v0_9_0_modinv32_signed30 *f, rustsecp256k1_v0_9_0_modinv32_signed30 *g, const rustsecp256k1_v0_9_0_modinv32_trans2x2 *t) {
|
||||
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
|
||||
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
|
||||
int32_t fi, gi;
|
||||
|
@ -421,7 +502,7 @@ static void rustsecp256k1_v0_8_1_modinv32_update_fg_30(rustsecp256k1_v0_8_1_modi
|
|||
*
|
||||
* This implements the update_fg function from the explanation in modinv64_impl.h.
|
||||
*/
|
||||
static void rustsecp256k1_v0_8_1_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_8_1_modinv32_signed30 *f, rustsecp256k1_v0_8_1_modinv32_signed30 *g, const rustsecp256k1_v0_8_1_modinv32_trans2x2 *t) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_9_0_modinv32_signed30 *f, rustsecp256k1_v0_9_0_modinv32_signed30 *g, const rustsecp256k1_v0_9_0_modinv32_trans2x2 *t) {
|
||||
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
|
||||
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
|
||||
int32_t fi, gi;
|
||||
|
@ -452,35 +533,35 @@ static void rustsecp256k1_v0_8_1_modinv32_update_fg_30_var(int len, rustsecp256k
|
|||
}
|
||||
|
||||
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
|
||||
static void rustsecp256k1_v0_8_1_modinv32(rustsecp256k1_v0_8_1_modinv32_signed30 *x, const rustsecp256k1_v0_8_1_modinv32_modinfo *modinfo) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32(rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo) {
|
||||
/* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 d = {{0}};
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 e = {{1}};
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 f = modinfo->modulus;
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 g = *x;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 d = {{0}};
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 e = {{1}};
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 f = modinfo->modulus;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 g = *x;
|
||||
int i;
|
||||
int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */
|
||||
|
||||
/* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */
|
||||
for (i = 0; i < 20; ++i) {
|
||||
/* Compute transition matrix and new zeta after 30 divsteps. */
|
||||
rustsecp256k1_v0_8_1_modinv32_trans2x2 t;
|
||||
zeta = rustsecp256k1_v0_8_1_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t);
|
||||
rustsecp256k1_v0_9_0_modinv32_trans2x2 t;
|
||||
zeta = rustsecp256k1_v0_9_0_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t);
|
||||
/* Update d,e using that transition matrix. */
|
||||
rustsecp256k1_v0_8_1_modinv32_update_de_30(&d, &e, &t, modinfo);
|
||||
rustsecp256k1_v0_9_0_modinv32_update_de_30(&d, &e, &t, modinfo);
|
||||
/* Update f,g using that transition matrix. */
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_modinv32_update_fg_30(&f, &g, &t);
|
||||
rustsecp256k1_v0_9_0_modinv32_update_fg_30(&f, &g, &t);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -489,28 +570,28 @@ static void rustsecp256k1_v0_8_1_modinv32(rustsecp256k1_v0_8_1_modinv32_signed30
|
|||
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
|
||||
#ifdef VERIFY
|
||||
/* g == 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
|
||||
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
|
||||
(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
|
||||
(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
|
||||
#endif
|
||||
|
||||
/* Optionally negate d, normalize to [0,modulus), and return it. */
|
||||
rustsecp256k1_v0_8_1_modinv32_normalize_30(&d, f.v[8], modinfo);
|
||||
rustsecp256k1_v0_9_0_modinv32_normalize_30(&d, f.v[8], modinfo);
|
||||
*x = d;
|
||||
}
|
||||
|
||||
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
|
||||
static void rustsecp256k1_v0_8_1_modinv32_var(rustsecp256k1_v0_8_1_modinv32_signed30 *x, const rustsecp256k1_v0_8_1_modinv32_modinfo *modinfo) {
|
||||
static void rustsecp256k1_v0_9_0_modinv32_var(rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo) {
|
||||
/* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 f = modinfo->modulus;
|
||||
rustsecp256k1_v0_8_1_modinv32_signed30 g = *x;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 f = modinfo->modulus;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 g = *x;
|
||||
#ifdef VERIFY
|
||||
int i = 0;
|
||||
#endif
|
||||
|
@ -521,18 +602,18 @@ static void rustsecp256k1_v0_8_1_modinv32_var(rustsecp256k1_v0_8_1_modinv32_sign
|
|||
/* Do iterations of 30 divsteps each until g=0. */
|
||||
while (1) {
|
||||
/* Compute transition matrix and new eta after 30 divsteps. */
|
||||
rustsecp256k1_v0_8_1_modinv32_trans2x2 t;
|
||||
eta = rustsecp256k1_v0_8_1_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t);
|
||||
rustsecp256k1_v0_9_0_modinv32_trans2x2 t;
|
||||
eta = rustsecp256k1_v0_9_0_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t);
|
||||
/* Update d,e using that transition matrix. */
|
||||
rustsecp256k1_v0_8_1_modinv32_update_de_30(&d, &e, &t, modinfo);
|
||||
rustsecp256k1_v0_9_0_modinv32_update_de_30(&d, &e, &t, modinfo);
|
||||
/* Update f,g using that transition matrix. */
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
rustsecp256k1_v0_8_1_modinv32_update_fg_30_var(len, &f, &g, &t);
|
||||
rustsecp256k1_v0_9_0_modinv32_update_fg_30_var(len, &f, &g, &t);
|
||||
/* If the bottom limb of g is 0, there is a chance g=0. */
|
||||
if (g.v[0] == 0) {
|
||||
cond = 0;
|
||||
|
@ -558,10 +639,10 @@ static void rustsecp256k1_v0_8_1_modinv32_var(rustsecp256k1_v0_8_1_modinv32_sign
|
|||
}
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -569,19 +650,89 @@ static void rustsecp256k1_v0_8_1_modinv32_var(rustsecp256k1_v0_8_1_modinv32_sign
|
|||
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
|
||||
#ifdef VERIFY
|
||||
/* g == 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
|
||||
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
|
||||
(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
(rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
|
||||
rustsecp256k1_v0_8_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
|
||||
(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
|
||||
(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
|
||||
rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
|
||||
#endif
|
||||
|
||||
/* Optionally negate d, normalize to [0,modulus), and return it. */
|
||||
rustsecp256k1_v0_8_1_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
|
||||
rustsecp256k1_v0_9_0_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
|
||||
*x = d;
|
||||
}
|
||||
|
||||
/* Do up to 50 iterations of 30 posdivsteps (up to 1500 steps; more is extremely rare) each until f=1.
|
||||
* In VERIFY mode use a lower number of iterations (750, close to the median 756), so failure actually occurs. */
|
||||
#ifdef VERIFY
|
||||
#define JACOBI32_ITERATIONS 25
|
||||
#else
|
||||
#define JACOBI32_ITERATIONS 50
|
||||
#endif
|
||||
|
||||
/* Compute the Jacobi symbol of x modulo modinfo->modulus (variable time). gcd(x,modulus) must be 1. */
|
||||
static int rustsecp256k1_v0_9_0_jacobi32_maybe_var(const rustsecp256k1_v0_9_0_modinv32_signed30 *x, const rustsecp256k1_v0_9_0_modinv32_modinfo *modinfo) {
|
||||
/* Start with f=modulus, g=x, eta=-1. */
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 f = modinfo->modulus;
|
||||
rustsecp256k1_v0_9_0_modinv32_signed30 g = *x;
|
||||
int j, len = 9;
|
||||
int32_t eta = -1; /* eta = -delta; delta is initially 1 */
|
||||
int32_t cond, fn, gn;
|
||||
int jac = 0;
|
||||
int count;
|
||||
|
||||
/* The input limbs must all be non-negative. */
|
||||
VERIFY_CHECK(g.v[0] >= 0 && g.v[1] >= 0 && g.v[2] >= 0 && g.v[3] >= 0 && g.v[4] >= 0 && g.v[5] >= 0 && g.v[6] >= 0 && g.v[7] >= 0 && g.v[8] >= 0);
|
||||
|
||||
/* If x > 0, then if the loop below converges, it converges to f=g=gcd(x,modulus). Since we
|
||||
* require that gcd(x,modulus)=1 and modulus>=3, x cannot be 0. Thus, we must reach f=1 (or
|
||||
* time out). */
|
||||
VERIFY_CHECK((g.v[0] | g.v[1] | g.v[2] | g.v[3] | g.v[4] | g.v[5] | g.v[6] | g.v[7] | g.v[8]) != 0);
|
||||
|
||||
for (count = 0; count < JACOBI32_ITERATIONS; ++count) {
|
||||
/* Compute transition matrix and new eta after 30 posdivsteps. */
|
||||
rustsecp256k1_v0_9_0_modinv32_trans2x2 t;
|
||||
eta = rustsecp256k1_v0_9_0_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac);
|
||||
/* Update f,g using that transition matrix. */
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
rustsecp256k1_v0_9_0_modinv32_update_fg_30_var(len, &f, &g, &t);
|
||||
/* If the bottom limb of f is 1, there is a chance that f=1. */
|
||||
if (f.v[0] == 1) {
|
||||
cond = 0;
|
||||
/* Check if the other limbs are also 0. */
|
||||
for (j = 1; j < len; ++j) {
|
||||
cond |= f.v[j];
|
||||
}
|
||||
/* If so, we're done. If f=1, the Jacobi symbol (g | f)=1. */
|
||||
if (cond == 0) return 1 - 2*(jac & 1);
|
||||
}
|
||||
|
||||
/* Determine if len>1 and limb (len-1) of both f and g is 0. */
|
||||
fn = f.v[len - 1];
|
||||
gn = g.v[len - 1];
|
||||
cond = ((int32_t)len - 2) >> 31;
|
||||
cond |= fn;
|
||||
cond |= gn;
|
||||
/* If so, reduce length. */
|
||||
if (cond == 0) --len;
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
|
||||
VERIFY_CHECK(rustsecp256k1_v0_9_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
|
||||
#endif
|
||||
}
|
||||
|
||||
/* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_MODINV32_IMPL_H */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue