From 4652ab611652c964f1cbb2bd8f0a3590e58fca04 Mon Sep 17 00:00:00 2001 From: "Dr. Maxim Orlovsky" Date: Mon, 14 Jun 2021 17:55:38 +0300 Subject: [PATCH 1/3] Updating secp256k1 version to 1758a92ffd896af533b142707e9892ea6e15e5db --- secp256k1-sys/Cargo.toml | 2 +- .../depend/secp256k1-HEAD-revision.txt | 2 +- secp256k1-sys/depend/secp256k1/.cirrus.yml | 315 ++ secp256k1-sys/depend/secp256k1/.gitignore | 8 + secp256k1-sys/depend/secp256k1/.travis.yml | 109 - secp256k1-sys/depend/secp256k1/Makefile.am | 38 +- secp256k1-sys/depend/secp256k1/README.md | 7 +- .../secp256k1/build-aux/m4/bitcoin_secp.m4 | 13 +- .../{contrib/travis.sh => ci/cirrus.sh} | 57 +- .../secp256k1/ci/linux-debian.Dockerfile | 24 + secp256k1-sys/depend/secp256k1/configure.ac | 279 +- .../secp256k1/contrib/lax_der_parsing.c | 9 +- .../secp256k1/contrib/lax_der_parsing.h | 16 +- .../contrib/lax_der_privatekey_parsing.c | 15 +- .../contrib/lax_der_privatekey_parsing.h | 12 +- .../secp256k1/doc/safegcd_implementation.md | 765 +++ .../depend/secp256k1/include/secp256k1.h | 277 +- .../depend/secp256k1/include/secp256k1.h.orig | 297 +- .../depend/secp256k1/include/secp256k1_ecdh.h | 24 +- .../secp256k1/include/secp256k1_extrakeys.h | 128 +- .../include/secp256k1_preallocated.h | 54 +- .../secp256k1/include/secp256k1_recovery.h | 44 +- .../secp256k1/include/secp256k1_schnorrsig.h | 28 +- secp256k1-sys/depend/secp256k1/obj/.gitignore | 0 .../secp256k1/sage/gen_exhaustive_groups.sage | 6 +- .../sage/gen_split_lambda_constants.sage | 16 +- .../sage/prove_group_implementations.sage | 50 +- .../secp256k1/src/asm/field_10x26_arm.s | 16 +- .../depend/secp256k1/src/assumptions.h | 2 +- .../depend/secp256k1/src/basic-config.h | 19 +- .../depend/secp256k1/src/bench_ecdh.c | 16 +- .../depend/secp256k1/src/bench_ecmult.c | 299 +- .../depend/secp256k1/src/bench_internal.c | 195 +- .../depend/secp256k1/src/bench_recover.c | 20 +- .../depend/secp256k1/src/bench_schnorrsig.c | 34 +- .../depend/secp256k1/src/bench_sign.c | 14 +- .../depend/secp256k1/src/bench_verify.c | 30 +- secp256k1-sys/depend/secp256k1/src/ecdsa.h | 8 +- .../depend/secp256k1/src/ecdsa_impl.h | 122 +- secp256k1-sys/depend/secp256k1/src/eckey.h | 12 +- .../depend/secp256k1/src/eckey_impl.h | 74 +- secp256k1-sys/depend/secp256k1/src/ecmult.h | 23 +- .../depend/secp256k1/src/ecmult_const.h | 2 +- .../depend/secp256k1/src/ecmult_const_impl.h | 118 +- .../depend/secp256k1/src/ecmult_gen.h | 22 +- .../depend/secp256k1/src/ecmult_gen_impl.h | 136 +- .../depend/secp256k1/src/ecmult_impl.h | 512 +- secp256k1-sys/depend/secp256k1/src/field.h | 71 +- .../depend/secp256k1/src/field_10x26.h | 4 +- .../depend/secp256k1/src/field_10x26_impl.h | 185 +- .../depend/secp256k1/src/field_5x52.h | 4 +- .../secp256k1/src/field_5x52_asm_impl.h | 4 +- .../depend/secp256k1/src/field_5x52_impl.h | 165 +- .../secp256k1/src/field_5x52_int128_impl.h | 4 +- .../depend/secp256k1/src/field_impl.h | 266 +- .../depend/secp256k1/src/gen_context.c | 32 +- secp256k1-sys/depend/secp256k1/src/group.h | 97 +- .../depend/secp256k1/src/group_impl.h | 563 +- secp256k1-sys/depend/secp256k1/src/hash.h | 26 +- .../depend/secp256k1/src/hash_impl.h | 122 +- secp256k1-sys/depend/secp256k1/src/modinv32.h | 42 + .../depend/secp256k1/src/modinv32_impl.h | 587 ++ secp256k1-sys/depend/secp256k1/src/modinv64.h | 46 + .../depend/secp256k1/src/modinv64_impl.h | 593 ++ .../src/modules/ecdh/Makefile.am.include | 2 +- .../secp256k1/src/modules/ecdh/main_impl.h | 50 +- .../secp256k1/src/modules/ecdh/tests_impl.h | 68 +- .../src/modules/extrakeys/Makefile.am.include | 2 +- .../src/modules/extrakeys/main_impl.h | 202 +- .../modules/extrakeys/tests_exhaustive_impl.h | 52 +- .../src/modules/extrakeys/tests_impl.h | 553 +- .../src/modules/recovery/Makefile.am.include | 2 +- .../src/modules/recovery/main_impl.h | 120 +- .../modules/recovery/tests_exhaustive_impl.h | 82 +- .../src/modules/recovery/tests_impl.h | 272 +- .../modules/schnorrsig/Makefile.am.include | 2 +- .../src/modules/schnorrsig/main_impl.h | 172 +- .../schnorrsig/tests_exhaustive_impl.h | 72 +- .../src/modules/schnorrsig/tests_impl.h | 278 +- secp256k1-sys/depend/secp256k1/src/num.h | 74 - secp256k1-sys/depend/secp256k1/src/num_gmp.h | 20 - .../depend/secp256k1/src/num_gmp_impl.h | 288 - secp256k1-sys/depend/secp256k1/src/num_impl.h | 24 - secp256k1-sys/depend/secp256k1/src/scalar.h | 62 +- .../depend/secp256k1/src/scalar_4x64.h | 2 +- .../depend/secp256k1/src/scalar_4x64_impl.h | 314 +- .../depend/secp256k1/src/scalar_8x32.h | 2 +- .../depend/secp256k1/src/scalar_8x32_impl.h | 251 +- .../depend/secp256k1/src/scalar_impl.h | 293 +- .../depend/secp256k1/src/scalar_low.h | 2 +- .../depend/secp256k1/src/scalar_low_impl.h | 65 +- secp256k1-sys/depend/secp256k1/src/scratch.h | 24 +- .../depend/secp256k1/src/scratch_impl.h | 30 +- .../depend/secp256k1/src/secp256k1.c | 533 +- .../depend/secp256k1/src/secp256k1.c.orig | 563 +- secp256k1-sys/depend/secp256k1/src/selftest.h | 16 +- secp256k1-sys/depend/secp256k1/src/testrand.h | 20 +- .../depend/secp256k1/src/testrand_impl.h | 70 +- secp256k1-sys/depend/secp256k1/src/tests.c | 4876 ++++++++++------- .../depend/secp256k1/src/tests_exhaustive.c | 245 +- secp256k1-sys/depend/secp256k1/src/util.h | 79 +- .../depend/secp256k1/src/util.h.orig | 87 +- .../secp256k1/src/valgrind_ctime_test.c | 124 +- secp256k1-sys/src/lib.rs | 114 +- secp256k1-sys/src/recovery.rs | 10 +- 105 files changed, 10025 insertions(+), 7172 deletions(-) create mode 100644 secp256k1-sys/depend/secp256k1/.cirrus.yml delete mode 100644 secp256k1-sys/depend/secp256k1/.travis.yml rename secp256k1-sys/depend/secp256k1/{contrib/travis.sh => ci/cirrus.sh} (50%) create mode 100644 secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile create mode 100644 secp256k1-sys/depend/secp256k1/doc/safegcd_implementation.md delete mode 100644 secp256k1-sys/depend/secp256k1/obj/.gitignore create mode 100644 secp256k1-sys/depend/secp256k1/src/modinv32.h create mode 100644 secp256k1-sys/depend/secp256k1/src/modinv32_impl.h create mode 100644 secp256k1-sys/depend/secp256k1/src/modinv64.h create mode 100644 secp256k1-sys/depend/secp256k1/src/modinv64_impl.h delete mode 100644 secp256k1-sys/depend/secp256k1/src/num.h delete mode 100644 secp256k1-sys/depend/secp256k1/src/num_gmp.h delete mode 100644 secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h delete mode 100644 secp256k1-sys/depend/secp256k1/src/num_impl.h diff --git a/secp256k1-sys/Cargo.toml b/secp256k1-sys/Cargo.toml index c602e80..94b4348 100644 --- a/secp256k1-sys/Cargo.toml +++ b/secp256k1-sys/Cargo.toml @@ -12,7 +12,7 @@ description = "FFI for Pieter Wuille's `libsecp256k1` library." keywords = [ "secp256k1", "libsecp256k1", "ffi" ] readme = "README.md" build = "build.rs" -links = "rustsecp256k1_v0_4_0" +links = "rustsecp256k1_v0_4_1" # Should make docs.rs show all functions, even those behind non-default features [package.metadata.docs.rs] diff --git a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt index a092d1d..24c6e28 100644 --- a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt +++ b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt @@ -1,2 +1,2 @@ # This file was automatically created by ./vendor-libsecp.sh -98dac87839838b86094f1bccc71cc20e67b146cc +1758a92ffd896af533b142707e9892ea6e15e5db diff --git a/secp256k1-sys/depend/secp256k1/.cirrus.yml b/secp256k1-sys/depend/secp256k1/.cirrus.yml new file mode 100644 index 0000000..6d63511 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/.cirrus.yml @@ -0,0 +1,315 @@ +env: + WIDEMUL: auto + STATICPRECOMPUTATION: yes + ECMULTGENPRECISION: auto + ASM: no + BUILD: check + WITH_VALGRIND: yes + EXTRAFLAGS: + HOST: + ECDH: no + RECOVERY: no + SCHNORRSIG: no + EXPERIMENTAL: no + CTIMETEST: yes + BENCH: yes + TEST_ITERS: + BENCH_ITERS: 2 + MAKEFLAGS: -j2 + +cat_logs_snippet: &CAT_LOGS + always: + cat_tests_log_script: + - cat tests.log || true + cat_exhaustive_tests_log_script: + - cat exhaustive_tests.log || true + cat_valgrind_ctime_test_log_script: + - cat valgrind_ctime_test.log || true + cat_bench_log_script: + - cat bench.log || true + on_failure: + cat_config_log_script: + - cat config.log || true + cat_test_env_script: + - cat test_env.log || true + cat_ci_env_script: + - env + +merge_base_script_snippet: &MERGE_BASE + merge_base_script: + - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi + - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH + - git config --global user.email "ci@ci.ci" + - git config --global user.name "ci" + - git merge FETCH_HEAD # Merge base to detect silent merge conflicts + +task: + name: "x86_64: Linux (Debian stable)" + container: + dockerfile: ci/linux-debian.Dockerfile + # Reduce number of CPUs to be able to do more builds in parallel. + cpu: 1 + # More than enough for our scripts. + memory: 1G + matrix: &ENV_MATRIX + - env: {WIDEMUL: int64, RECOVERY: yes} + - env: {WIDEMUL: int64, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int128} + - env: {WIDEMUL: int128, RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int128, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int128, ASM: x86_64} + - env: { RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: { STATICPRECOMPUTATION: no} + - env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no} + - env: {CPPFLAGS: -DDETERMINISTIC} + - env: {CFLAGS: -O0, CTIMETEST: no} + - env: { ECMULTGENPRECISION: 2 } + - env: { ECMULTGENPRECISION: 8 } + matrix: + - env: + CC: gcc + - env: + CC: clang + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "i686: Linux (Debian stable)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + HOST: i686-linux-gnu + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + matrix: + - env: + CC: i686-linux-gnu-gcc + - env: + CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "x86_64: macOS Catalina" + macos_instance: + image: catalina-base + env: + HOMEBREW_NO_AUTO_UPDATE: 1 + HOMEBREW_NO_INSTALL_CLEANUP: 1 + # Cirrus gives us a fixed number of 12 virtual CPUs. Not that we even have that many jobs at the moment... + MAKEFLAGS: -j13 + matrix: + << : *ENV_MATRIX + matrix: + - env: + CC: gcc-9 + - env: + CC: clang + # Update Command Line Tools + # Uncomment this if the Command Line Tools on the CirrusCI macOS image are too old to brew valgrind. + # See https://apple.stackexchange.com/a/195963 for the implementation. + ## update_clt_script: + ## - system_profiler SPSoftwareDataType + ## - touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress + ## - |- + ## PROD=$(softwareupdate -l | grep "*.*Command Line" | tail -n 1 | awk -F"*" '{print $2}' | sed -e 's/^ *//' | sed 's/Label: //g' | tr -d '\n') + ## # For debugging + ## - softwareupdate -l && echo "PROD: $PROD" + ## - softwareupdate -i "$PROD" --verbose + ## - rm /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress + ## + brew_valgrind_pre_script: + - brew config + - brew tap --shallow LouisBrunner/valgrind + # Fetch valgrind source but don't build it yet. + - brew fetch --HEAD LouisBrunner/valgrind/valgrind + brew_valgrind_cache: + # This is $(brew --cellar valgrind) but command substition does not work here. + folder: /usr/local/Cellar/valgrind + # Rebuild cache if ... + fingerprint_script: + # ... macOS version changes: + - sw_vers + # ... brew changes: + - brew config + # ... valgrind changes: + - git -C "$(brew --cache)/valgrind--git" rev-parse HEAD + populate_script: + # If there's no hit in the cache, build and install valgrind. + - brew install --HEAD LouisBrunner/valgrind/valgrind + brew_valgrind_post_script: + # If we have restored valgrind from the cache, tell brew to create symlink to the PATH. + # If we haven't restored from cached (and just run brew install), this is a no-op. + - brew link valgrind + brew_script: + - brew install automake libtool gcc@9 + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "s390x (big-endian): Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-s390x + TEST_ITERS: 16 + HOST: s390x-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + # https://sourceware.org/bugzilla/show_bug.cgi?id=27008 + - rm /etc/ld.so.cache + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ARM32: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-arm + TEST_ITERS: 16 + HOST: arm-linux-gnueabihf + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + matrix: + - env: {} + - env: {ASM: arm} + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ARM64: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-aarch64 + TEST_ITERS: 16 + HOST: aarch64-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ppc64le: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-ppc64le + TEST_ITERS: 16 + HOST: powerpc64le-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: wine64-stable + TEST_ITERS: 16 + HOST: x86_64-w64-mingw32 + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +# Sanitizers +task: + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + EXTRAFLAGS: "--disable-openssl-tests" + matrix: + - name: "Valgrind (memcheck)" + env: + # The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) + WRAPPER_CMD: "valgrind --error-exitcode=42" + TEST_ITERS: 16 + - name: "UBSan, ASan, LSan" + env: + CFLAGS: "-fsanitize=undefined,address" + CFLAGS_FOR_BUILD: "-fsanitize=undefined,address" + UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1" + ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1" + LSAN_OPTIONS: "use_unaligned=1" + TEST_ITERS: 32 + # Try to cover many configurations with just a tiny matrix. + matrix: + - env: + ASM: auto + STATICPRECOMPUTATION: yes + - env: + ASM: no + STATICPRECOMPUTATION: no + ECMULTGENPRECISION: 2 + matrix: + - env: + CC: clang + - env: + HOST: i686-linux-gnu + CC: i686-linux-gnu-gcc + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + diff --git a/secp256k1-sys/depend/secp256k1/.gitignore b/secp256k1-sys/depend/secp256k1/.gitignore index ccdef02..b62055a 100644 --- a/secp256k1-sys/depend/secp256k1/.gitignore +++ b/secp256k1-sys/depend/secp256k1/.gitignore @@ -33,6 +33,14 @@ libtool *~ *.log *.trs + +coverage/ +coverage.html +coverage.*.html +*.gcda +*.gcno +*.gcov + src/libsecp256k1-config.h src/libsecp256k1-config.h.in src/ecmult_static_context.h diff --git a/secp256k1-sys/depend/secp256k1/.travis.yml b/secp256k1-sys/depend/secp256k1/.travis.yml deleted file mode 100644 index 91f1d41..0000000 --- a/secp256k1-sys/depend/secp256k1/.travis.yml +++ /dev/null @@ -1,109 +0,0 @@ -language: c -os: - - linux - - osx - -dist: bionic -# Valgrind currently supports upto macOS 10.13, the latest xcode of that version is 10.1 -osx_image: xcode10.1 -addons: - apt: - packages: - - libgmp-dev - - valgrind - - libtool-bin -compiler: - - clang - - gcc -env: - global: - - WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2 - matrix: - - WIDEMUL=int64 RECOVERY=yes - - WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int128 - - WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int128 ASM=x86_64 - - BIGNUM=no - - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - BIGNUM=no STATICPRECOMPUTATION=no - - BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no - - CPPFLAGS=-DDETERMINISTIC - - CFLAGS=-O0 CTIMETEST=no - - CFLAGS="-fsanitize=undefined -fno-omit-frame-pointer" LDFLAGS="-fsanitize=undefined -fno-omit-frame-pointer" UBSAN_OPTIONS="print_stacktrace=1:halt_on_error=1" BIGNUM=no ASM=x86_64 ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes CTIMETEST=no - - ECMULTGENPRECISION=2 - - ECMULTGENPRECISION=8 - - RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes EXTRAFLAGS="--disable-openssl-tests" BUILD= -matrix: - fast_finish: true - include: - - compiler: clang - os: linux - env: HOST=i686-linux-gnu - addons: - apt: - packages: - - gcc-multilib - - libgmp-dev:i386 - - valgrind - - libtool-bin - - libc6-dbg:i386 - - compiler: clang - env: HOST=i686-linux-gnu - os: linux - addons: - apt: - packages: - - gcc-multilib - - valgrind - - libtool-bin - - libc6-dbg:i386 - - compiler: gcc - env: HOST=i686-linux-gnu - os: linux - addons: - apt: - packages: - - gcc-multilib - - valgrind - - libtool-bin - - libc6-dbg:i386 - - compiler: gcc - os: linux - env: HOST=i686-linux-gnu - addons: - apt: - packages: - - gcc-multilib - - libgmp-dev:i386 - - valgrind - - libtool-bin - - libc6-dbg:i386 - # S390x build (big endian system) - - compiler: gcc - env: HOST=s390x-unknown-linux-gnu ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes CTIMETEST= - arch: s390x - -# We use this to install macOS dependencies instead of the built in `homebrew` plugin, -# because in xcode earlier than 11 they have a bug requiring updating the system which overall takes ~8 minutes. -# https://travis-ci.community/t/macos-build-fails-because-of-homebrew-bundle-unknown-command/7296 -before_install: - - if [ "${TRAVIS_OS_NAME}" = "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install gmp valgrind gcc@9; fi - -before_script: ./autogen.sh - -# travis auto terminates jobs that go for 10 minutes without printing to stdout, but travis_wait doesn't work well with forking programs like valgrind (https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received https://github.com/bitcoin-core/secp256k1/pull/750#issuecomment-623476860) -script: - - function keep_alive() { while true; do echo -en "\a"; sleep 60; done } - - keep_alive & - - ./contrib/travis.sh - - kill %keep_alive - -after_script: - - cat ./tests.log - - cat ./exhaustive_tests.log - - cat ./valgrind_ctime_test.log - - cat ./bench.log - - $CC --version - - valgrind --version diff --git a/secp256k1-sys/depend/secp256k1/Makefile.am b/secp256k1-sys/depend/secp256k1/Makefile.am index 774f51d..9f21fc5 100644 --- a/secp256k1-sys/depend/secp256k1/Makefile.am +++ b/secp256k1-sys/depend/secp256k1/Makefile.am @@ -2,7 +2,7 @@ ACLOCAL_AMFLAGS = -I build-aux/m4 lib_LTLIBRARIES = libsecp256k1.la include_HEADERS = include/secp256k1.h -include_HEADERS += include/rustsecp256k1_v0_4_0_preallocated.h +include_HEADERS += include/rustsecp256k1_v0_4_1_preallocated.h noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h @@ -14,8 +14,6 @@ noinst_HEADERS += src/scalar_8x32_impl.h noinst_HEADERS += src/scalar_low_impl.h noinst_HEADERS += src/group.h noinst_HEADERS += src/group_impl.h -noinst_HEADERS += src/num_gmp.h -noinst_HEADERS += src/num_gmp_impl.h noinst_HEADERS += src/ecdsa.h noinst_HEADERS += src/ecdsa_impl.h noinst_HEADERS += src/eckey.h @@ -26,14 +24,16 @@ noinst_HEADERS += src/ecmult_const.h noinst_HEADERS += src/ecmult_const_impl.h noinst_HEADERS += src/ecmult_gen.h noinst_HEADERS += src/ecmult_gen_impl.h -noinst_HEADERS += src/num.h -noinst_HEADERS += src/num_impl.h noinst_HEADERS += src/field_10x26.h noinst_HEADERS += src/field_10x26_impl.h noinst_HEADERS += src/field_5x52.h noinst_HEADERS += src/field_5x52_impl.h noinst_HEADERS += src/field_5x52_int128_impl.h noinst_HEADERS += src/field_5x52_asm_impl.h +noinst_HEADERS += src/modinv32.h +noinst_HEADERS += src/modinv32_impl.h +noinst_HEADERS += src/modinv64.h +noinst_HEADERS += src/modinv64_impl.h noinst_HEADERS += src/assumptions.h noinst_HEADERS += src/util.h noinst_HEADERS += src/scratch.h @@ -52,7 +52,7 @@ noinst_HEADERS += contrib/lax_der_privatekey_parsing.h noinst_HEADERS += contrib/lax_der_privatekey_parsing.c if USE_EXTERNAL_ASM -COMMON_LIB = librustsecp256k1_v0_4_0_common.la +COMMON_LIB = librustsecp256k1_v0_4_1_common.la noinst_LTLIBRARIES = $(COMMON_LIB) else COMMON_LIB = @@ -63,16 +63,16 @@ pkgconfig_DATA = libsecp256k1.pc if USE_EXTERNAL_ASM if USE_ASM_ARM -librustsecp256k1_v0_4_0_common_la_SOURCES = src/asm/field_10x26_arm.s +librustsecp256k1_v0_4_1_common_la_SOURCES = src/asm/field_10x26_arm.s endif endif -librustsecp256k1_v0_4_0_la_SOURCES = src/secp256k1.c -librustsecp256k1_v0_4_0_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) -librustsecp256k1_v0_4_0_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) +librustsecp256k1_v0_4_1_la_SOURCES = src/secp256k1.c +librustsecp256k1_v0_4_1_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) +librustsecp256k1_v0_4_1_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) if VALGRIND_ENABLED -librustsecp256k1_v0_4_0_la_CPPFLAGS += -DVALGRIND +librustsecp256k1_v0_4_1_la_CPPFLAGS += -DVALGRIND endif noinst_PROGRAMS = @@ -81,27 +81,27 @@ noinst_PROGRAMS += bench_verify bench_sign bench_internal bench_ecmult bench_verify_SOURCES = src/bench_verify.c bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) # SECP_TEST_INCLUDES are only used here for CRYPTO_CPPFLAGS -bench_verify_CPPFLAGS = -DSECP256K1_BUILD $(SECP_TEST_INCLUDES) +bench_verify_CPPFLAGS = $(SECP_TEST_INCLUDES) bench_sign_SOURCES = src/bench_sign.c bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) bench_internal_SOURCES = src/bench_internal.c bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_internal_CPPFLAGS = $(SECP_INCLUDES) bench_ecmult_SOURCES = src/bench_ecmult.c bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_ecmult_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_ecmult_CPPFLAGS = $(SECP_INCLUDES) endif TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) if VALGRIND_ENABLED tests_CPPFLAGS += -DVALGRIND noinst_PROGRAMS += valgrind_ctime_test valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c -valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB) +valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) endif if !ENABLE_COVERAGE tests_CPPFLAGS += -DVERIFY @@ -114,7 +114,7 @@ endif if USE_EXHAUSTIVE_TESTS noinst_PROGRAMS += exhaustive_tests exhaustive_tests_SOURCES = src/tests_exhaustive.c -exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES) +exhaustive_tests_CPPFLAGS = -I$(top_srcdir)/src $(SECP_INCLUDES) if !ENABLE_COVERAGE exhaustive_tests_CPPFLAGS += -DVERIFY endif @@ -129,12 +129,12 @@ CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -I$(builddir)/src gen_context_OBJECTS = gen_context.o gen_context_BIN = gen_context$(BUILD_EXEEXT) gen_%.o: src/gen_%.c src/libsecp256k1-config.h - $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ + $(CC_FOR_BUILD) $(DEFS) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ $(gen_context_BIN): $(gen_context_OBJECTS) $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ -$(librustsecp256k1_v0_4_0_la_OBJECTS): src/ecmult_static_context.h +$(librustsecp256k1_v0_4_1_la_OBJECTS): src/ecmult_static_context.h $(tests_OBJECTS): src/ecmult_static_context.h $(bench_internal_OBJECTS): src/ecmult_static_context.h $(bench_ecmult_OBJECTS): src/ecmult_static_context.h diff --git a/secp256k1-sys/depend/secp256k1/README.md b/secp256k1-sys/depend/secp256k1/README.md index e070937..a7eb2b0 100644 --- a/secp256k1-sys/depend/secp256k1/README.md +++ b/secp256k1-sys/depend/secp256k1/README.md @@ -1,7 +1,7 @@ libsecp256k1 ============ -[![Build Status](https://travis-ci.org/bitcoin-core/secp256k1.svg?branch=master)](https://travis-ci.org/bitcoin-core/secp256k1) +[![Build Status](https://api.cirrus-ci.com/github/bitcoin-core/secp256k1.svg?branch=master)](https://cirrus-ci.com/github/bitcoin-core/secp256k1) Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1. @@ -34,11 +34,11 @@ Implementation details * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). - * Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman). * Scalar operations * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. * Using 4 64-bit limbs (relying on __int128 support in the compiler). * Using 8 32-bit limbs. +* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). * Group operations * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). * Use addition between points in Jacobian and affine coordinates where possible. @@ -96,7 +96,8 @@ To create a report, `gcovr` is recommended, as it includes branch coverage repor To create a HTML report with coloured and annotated source code: - $ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html + $ mkdir -p coverage + $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html Reporting a vulnerability ------------ diff --git a/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 b/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 index ece3d65..e57888c 100644 --- a/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -75,15 +75,10 @@ if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then fi ]) -dnl -AC_DEFUN([SECP_GMP_CHECK],[ -if test x"$has_gmp" != x"yes"; then +AC_DEFUN([SECP_VALGRIND_CHECK],[ +if test x"$has_valgrind" != x"yes"; then CPPFLAGS_TEMP="$CPPFLAGS" - CPPFLAGS="$GMP_CPPFLAGS $CPPFLAGS" - LIBS_TEMP="$LIBS" - LIBS="$GMP_LIBS $LIBS" - AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS="$GMP_LIBS -lgmp"; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])]) - CPPFLAGS="$CPPFLAGS_TEMP" - LIBS="$LIBS_TEMP" + CPPFLAGS="$VALGRIND_CPPFLAGS $CPPFLAGS" + AC_CHECK_HEADER([valgrind/memcheck.h], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed])]) fi ]) diff --git a/secp256k1-sys/depend/secp256k1/contrib/travis.sh b/secp256k1-sys/depend/secp256k1/ci/cirrus.sh similarity index 50% rename from secp256k1-sys/depend/secp256k1/contrib/travis.sh rename to secp256k1-sys/depend/secp256k1/ci/cirrus.sh index ed98623..27db1e6 100755 --- a/secp256k1-sys/depend/secp256k1/contrib/travis.sh +++ b/secp256k1-sys/depend/secp256k1/ci/cirrus.sh @@ -3,46 +3,49 @@ set -e set -x -if [ "$HOST" = "i686-linux-gnu" ] -then - export CC="$CC -m32" -fi -if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$TRAVIS_COMPILER" = "gcc" ] -then - export CC="gcc-9" -fi +export LC_ALL=C + +env >> test_env.log + +$CC -v || true +valgrind --version || true + +./autogen.sh ./configure \ --enable-experimental="$EXPERIMENTAL" \ - --with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \ + --with-test-override-wide-multiply="$WIDEMUL" --with-asm="$ASM" \ --enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-schnorrsig="$SCHNORRSIG" \ --with-valgrind="$WITH_VALGRIND" \ --host="$HOST" $EXTRAFLAGS -if [ -n "$BUILD" ] -then - make -j2 "$BUILD" -fi -if [ "$RUN_VALGRIND" = "yes" ] -then - make -j2 - # the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) - valgrind --error-exitcode=42 ./tests 16 - valgrind --error-exitcode=42 ./exhaustive_tests -fi +# We have set "-j" in MAKEFLAGS. +make + +# Print information about binaries so that we can see that the architecture is correct +file *tests* || true +file bench_* || true +file .libs/* || true + +# This tells `make check` to wrap test invocations. +export LOG_COMPILER="$WRAPPER_CMD" + +# This limits the iterations in the tests and benchmarks. +export SECP256K1_TEST_ITERS="$TEST_ITERS" +export SECP256K1_BENCH_ITERS="$BENCH_ITERS" + +make "$BUILD" + if [ "$BENCH" = "yes" ] then - if [ "$RUN_VALGRIND" = "yes" ] + # Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool + EXEC='./libtool --mode=execute' + if [ -n "$WRAPPER_CMD" ] then - # Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool - EXEC='./libtool --mode=execute valgrind --error-exitcode=42' - else - EXEC= + EXEC="$EXEC $WRAPPER_CMD" fi - # This limits the iterations in the benchmarks below to ITER(set in .travis.yml) iterations. - export SECP256K1_BENCH_ITERS="$ITERS" { $EXEC ./bench_ecmult $EXEC ./bench_internal diff --git a/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile new file mode 100644 index 0000000..6def913 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile @@ -0,0 +1,24 @@ +FROM debian:stable + +RUN dpkg --add-architecture i386 +RUN dpkg --add-architecture s390x +RUN dpkg --add-architecture armhf +RUN dpkg --add-architecture arm64 +RUN dpkg --add-architecture ppc64el +RUN apt-get update + +# dkpg-dev: to make pkg-config work in cross-builds +# llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces +RUN apt-get install --no-install-recommends --no-upgrade -y \ + git ca-certificates \ + make automake libtool pkg-config dpkg-dev valgrind qemu-user \ + gcc clang llvm libc6-dbg \ + gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan5:i386 \ + gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \ + gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \ + wine gcc-mingw-w64-x86-64 + +# Run a dummy command in wine to make it set up configuration +RUN wine64-stable xcopy || true diff --git a/secp256k1-sys/depend/secp256k1/configure.ac b/secp256k1-sys/depend/secp256k1/configure.ac index eb3b449..1ed991a 100644 --- a/secp256k1-sys/depend/secp256k1/configure.ac +++ b/secp256k1-sys/depend/secp256k1/configure.ac @@ -14,7 +14,7 @@ AM_INIT_AUTOMAKE([foreign subdir-objects]) : ${CFLAGS="-g"} LT_INIT -dnl make the compilation flags quiet unless V=1 is used +# Make the compilation flags quiet unless V=1 is used. m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) PKG_PROG_PKG_CONFIG @@ -22,9 +22,16 @@ PKG_PROG_PKG_CONFIG AC_PATH_TOOL(AR, ar) AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) -AX_PROG_CC_FOR_BUILD +# Save definition of AC_PROG_CC because AM_PROG_CC_C_O in automake<=1.13 will +# redefine AC_PROG_CC to exit with an error, which avoids the user calling it +# accidently and screwing up the effect of AM_PROG_CC_C_O. However, we'll need +# AC_PROG_CC later on in AX_PROG_CC_FOR_BUILD, where its usage is fine, and +# we'll carefully make sure not to call AC_PROG_CC anywhere else. +m4_copy([AC_PROG_CC], [saved_AC_PROG_CC]) AM_PROG_CC_C_O +# Restore AC_PROG_CC +m4_rename_force([saved_AC_PROG_CC], [AC_PROG_CC]) AC_PROG_CC_C89 if test x"$ac_cv_prog_cc_c89" = x"no"; then @@ -37,25 +44,23 @@ case $host_os in if test x$cross_compiling != xyes; then AC_PATH_PROG([BREW],brew,) if test x$BREW != x; then - dnl These Homebrew packages may be keg-only, meaning that they won't be found - dnl in expected paths because they may conflict with system files. Ask - dnl Homebrew where each one is located, then adjust paths accordingly. - + # These Homebrew packages may be keg-only, meaning that they won't be found + # in expected paths because they may conflict with system files. Ask + # Homebrew where each one is located, then adjust paths accordingly. openssl_prefix=`$BREW --prefix openssl 2>/dev/null` - gmp_prefix=`$BREW --prefix gmp 2>/dev/null` + valgrind_prefix=`$BREW --prefix valgrind 2>/dev/null` if test x$openssl_prefix != x; then PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" export PKG_CONFIG_PATH CRYPTO_CPPFLAGS="-I$openssl_prefix/include" fi - if test x$gmp_prefix != x; then - GMP_CPPFLAGS="-I$gmp_prefix/include" - GMP_LIBS="-L$gmp_prefix/lib" + if test x$valgrind_prefix != x; then + VALGRIND_CPPFLAGS="-I$valgrind_prefix/include" fi else AC_PATH_PROG([PORT],port,) - dnl if homebrew isn't installed and macports is, add the macports default paths - dnl as a last resort. + # If homebrew isn't installed and macports is, add the macports default paths + # as a last resort. if test x$PORT != x; then CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" LDFLAGS="$LDFLAGS -L/opt/local/lib" @@ -77,6 +82,15 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], CFLAGS="$saved_CFLAGS" ]) +saved_CFLAGS="$CFLAGS" +CFLAGS="-Wconditional-uninitialized $CFLAGS" +AC_MSG_CHECKING([if ${CC} supports -Wconditional-uninitialized]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], + [ AC_MSG_RESULT([yes]) ], + [ AC_MSG_RESULT([no]) + CFLAGS="$saved_CFLAGS" + ]) + saved_CFLAGS="$CFLAGS" CFLAGS="-fvisibility=hidden $CFLAGS" AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) @@ -86,6 +100,10 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], CFLAGS="$saved_CFLAGS" ]) +### +### Define config arguments +### + AC_ARG_ENABLE(benchmark, AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), [use_benchmark=$enableval], @@ -146,13 +164,10 @@ AC_ARG_ENABLE(external_default_callbacks, [use_external_default_callbacks=$enableval], [use_external_default_callbacks=no]) -dnl Test-only override of the (autodetected by the C code) "widemul" setting. -dnl Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). +# Test-only override of the (autodetected by the C code) "widemul" setting. +# Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) -AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], -[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) - AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], [assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) @@ -177,15 +192,22 @@ AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto], )], [req_valgrind=$withval], [req_valgrind=auto]) +### +### Handle config options (except for modules) +### + if test x"$req_valgrind" = x"no"; then enable_valgrind=no else - AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [ + SECP_VALGRIND_CHECK + if test x"$has_valgrind" != x"yes"; then if test x"$req_valgrind" = x"yes"; then AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available]) fi enable_valgrind=no - ], []) + else + enable_valgrind=yes + fi fi AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) @@ -197,61 +219,6 @@ else CFLAGS="-O2 $CFLAGS" fi -if test x"$use_ecmult_static_precomputation" != x"no"; then - # Temporarily switch to an environment for the native compiler - save_cross_compiling=$cross_compiling - cross_compiling=no - SAVE_CC="$CC" - CC="$CC_FOR_BUILD" - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS_FOR_BUILD" - SAVE_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS_FOR_BUILD" - SAVE_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS_FOR_BUILD" - - warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" - saved_CFLAGS="$CFLAGS" - CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS" - AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) - AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - - AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) - AC_RUN_IFELSE( - [AC_LANG_PROGRAM([], [])], - [working_native_cc=yes], - [working_native_cc=no],[:]) - - CFLAGS_FOR_BUILD="$CFLAGS" - - # Restore the environment - cross_compiling=$save_cross_compiling - CC="$SAVE_CC" - CFLAGS="$SAVE_CFLAGS" - CPPFLAGS="$SAVE_CPPFLAGS" - LDFLAGS="$SAVE_LDFLAGS" - - if test x"$working_native_cc" = x"no"; then - AC_MSG_RESULT([no]) - set_precomp=no - m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) - if test x"$use_ecmult_static_precomputation" = x"yes"; then - AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) - else - AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) - fi - else - AC_MSG_RESULT([yes]) - set_precomp=yes - fi -else - set_precomp=no -fi - if test x"$req_asm" = x"auto"; then SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" = x"yes"; then @@ -279,33 +246,7 @@ else esac fi -if test x"$req_bignum" = x"auto"; then - SECP_GMP_CHECK - if test x"$has_gmp" = x"yes"; then - set_bignum=gmp - fi - - if test x"$set_bignum" = x; then - set_bignum=no - fi -else - set_bignum=$req_bignum - case $set_bignum in - gmp) - SECP_GMP_CHECK - if test x"$has_gmp" != x"yes"; then - AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available]) - fi - ;; - no) - ;; - *) - AC_MSG_ERROR([invalid bignum implementation selection]) - ;; - esac -fi - -# select assembly optimization +# Select assembly optimization use_external_asm=no case $set_asm in @@ -322,7 +263,12 @@ no) ;; esac -# select wide multiplication implementation +if test x"$use_external_asm" = x"yes"; then + AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) +fi + + +# Select wide multiplication implementation case $set_widemul in int128) AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation]) @@ -337,25 +283,7 @@ auto) ;; esac -# select bignum implementation -case $set_bignum in -gmp) - AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) - AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) - AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) - ;; -no) - AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) - AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) - ;; -*) - AC_MSG_ERROR([invalid bignum implementation]) - ;; -esac - -#set ecmult window size +# Set ecmult window size if test x"$req_ecmult_window" = x"auto"; then set_ecmult_window=15 else @@ -377,7 +305,7 @@ case $set_ecmult_window in ;; esac -#set ecmult gen precision +# Set ecmult gen precision if test x"$req_ecmult_gen_precision" = x"auto"; then set_ecmult_gen_precision=4 else @@ -419,15 +347,93 @@ else enable_openssl_tests=no fi -if test x"$set_bignum" = x"gmp"; then - SECP_LIBS="$SECP_LIBS $GMP_LIBS" - SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" +if test x"$enable_valgrind" = x"yes"; then + SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS" +fi + +# Handle static precomputation (after everything which modifies CFLAGS and friends) +if test x"$use_ecmult_static_precomputation" != x"no"; then + if test x"$cross_compiling" = x"no"; then + set_precomp=yes + if test x"${CC_FOR_BUILD+x}${CFLAGS_FOR_BUILD+x}${CPPFLAGS_FOR_BUILD+x}${LDFLAGS_FOR_BUILD+x}" != x; then + AC_MSG_WARN([CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD is set but ignored because we are not cross-compiling.]) + fi + # If we're not cross-compiling, simply use the same compiler for building the static precompation code. + CC_FOR_BUILD="$CC" + CFLAGS_FOR_BUILD="$CFLAGS" + CPPFLAGS_FOR_BUILD="$CPPFLAGS" + LDFLAGS_FOR_BUILD="$LDFLAGS" + else + AX_PROG_CC_FOR_BUILD + + # Temporarily switch to an environment for the native compiler + save_cross_compiling=$cross_compiling + cross_compiling=no + SAVE_CC="$CC" + CC="$CC_FOR_BUILD" + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS_FOR_BUILD" + SAVE_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS_FOR_BUILD" + SAVE_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS_FOR_BUILD" + + warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" + saved_CFLAGS="$CFLAGS" + CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS" + AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], + [ AC_MSG_RESULT([yes]) ], + [ AC_MSG_RESULT([no]) + CFLAGS="$saved_CFLAGS" + ]) + + AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) + AC_RUN_IFELSE( + [AC_LANG_PROGRAM([], [])], + [working_native_cc=yes], + [working_native_cc=no],[:]) + + CFLAGS_FOR_BUILD="$CFLAGS" + + # Restore the environment + cross_compiling=$save_cross_compiling + CC="$SAVE_CC" + CFLAGS="$SAVE_CFLAGS" + CPPFLAGS="$SAVE_CPPFLAGS" + LDFLAGS="$SAVE_LDFLAGS" + + if test x"$working_native_cc" = x"no"; then + AC_MSG_RESULT([no]) + set_precomp=no + m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) + if test x"$use_ecmult_static_precomputation" = x"yes"; then + AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) + else + AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) + fi + else + AC_MSG_RESULT([yes]) + set_precomp=yes + fi + fi + + AC_SUBST(CC_FOR_BUILD) + AC_SUBST(CFLAGS_FOR_BUILD) + AC_SUBST(CPPFLAGS_FOR_BUILD) + AC_SUBST(LDFLAGS_FOR_BUILD) +else + set_precomp=no fi if test x"$set_precomp" = x"yes"; then AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) fi +### +### Handle module options +### + if test x"$enable_module_ecdh" = x"yes"; then AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module]) fi @@ -447,14 +453,14 @@ if test x"$enable_module_extrakeys" = x"yes"; then AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module]) fi -if test x"$use_external_asm" = x"yes"; then - AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) -fi - if test x"$use_external_default_callbacks" = x"yes"; then AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used]) fi +### +### Check for --enable-experimental if necessary +### + if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) AC_MSG_NOTICE([WARNING: experimental build]) @@ -474,6 +480,10 @@ else fi fi +### +### Generate output +### + AC_CONFIG_HEADERS([src/libsecp256k1-config.h]) AC_CONFIG_FILES([Makefile libsecp256k1.pc]) AC_SUBST(SECP_INCLUDES) @@ -492,7 +502,7 @@ AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) -dnl make sure nothing new is exported so that we don't break the cache +# Make sure nothing new is exported so that we don't break the cache. PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" unset PKG_CONFIG_PATH PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" @@ -513,10 +523,9 @@ echo " module extrakeys = $enable_module_extrakeys" echo " module schnorrsig = $enable_module_schnorrsig" echo echo " asm = $set_asm" -echo " bignum = $set_bignum" echo " ecmult window size = $set_ecmult_window" echo " ecmult gen prec. bits = $set_ecmult_gen_precision" -dnl Hide test-only options unless they're used. +# Hide test-only options unless they're used. if test x"$set_widemul" != xauto; then echo " wide multiplication = $set_widemul" fi @@ -527,3 +536,9 @@ echo " CFLAGS = $CFLAGS" echo " CPPFLAGS = $CPPFLAGS" echo " LDFLAGS = $LDFLAGS" echo +if test x"$set_precomp" = x"yes"; then +echo " CC_FOR_BUILD = $CC_FOR_BUILD" +echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD" +echo " CPPFLAGS_FOR_BUILD = $CPPFLAGS_FOR_BUILD" +echo " LDFLAGS_FOR_BUILD = $LDFLAGS_FOR_BUILD" +fi diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c index 12f569a..6657346 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c @@ -5,11 +5,10 @@ ***********************************************************************/ #include -#include #include "lax_der_parsing.h" -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { size_t rpos, rlen, spos, slen; size_t pos = 0; size_t lenbyte; @@ -17,7 +16,7 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_ int overflow = 0; /* Hack to initialize sig with a correctly-parsed but invalid signature. */ - rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); /* Sequence tag byte */ if (pos == inputlen || input[pos] != 0x30) { @@ -138,11 +137,11 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_ } if (!overflow) { - overflow = !rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + overflow = !rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } if (overflow) { memset(tmpsig, 0, 64); - rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } return 1; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h index 6e20e0d..7792908 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h @@ -26,8 +26,8 @@ * certain violations are easily supported. You may need to adapt it. * * Do not use this for new systems. Use well-defined DER or compact signatures - * instead if you have the choice (see rustsecp256k1_v0_4_0_ecdsa_signature_parse_der and - * rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact). + * instead if you have the choice (see rustsecp256k1_v0_4_1_ecdsa_signature_parse_der and + * rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact). * * The supported violations are: * - All numbers are parsed as nonnegative integers, even though X.609-0207 @@ -51,7 +51,13 @@ #ifndef SECP256K1_CONTRIB_LAX_DER_PARSING_H #define SECP256K1_CONTRIB_LAX_DER_PARSING_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { @@ -77,9 +83,9 @@ extern "C" { * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c index 710f554..ce96852 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c @@ -5,11 +5,10 @@ ***********************************************************************/ #include -#include #include "lax_der_privatekey_parsing.h" -int ec_privkey_import_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { +int ec_privkey_import_der(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { const unsigned char *end = privkey + privkeylen; int lenb = 0; int len = 0; @@ -46,17 +45,17 @@ int ec_privkey_import_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char return 0; } memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); - if (!rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, out32)) { + if (!rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; } return 1; } -int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { - rustsecp256k1_v0_4_0_pubkey pubkey; +int ec_privkey_export_der(const rustsecp256k1_v0_4_1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { + rustsecp256k1_v0_4_1_pubkey pubkey; size_t pubkeylen = 0; - if (!rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, key32)) { + if (!rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, key32)) { *privkeylen = 0; return 0; } @@ -80,7 +79,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 33; - rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); + rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } else { @@ -105,7 +104,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 65; - rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); + rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h index 671c62e..8899a32 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h @@ -28,7 +28,13 @@ #ifndef SECP256K1_CONTRIB_BER_PRIVATEKEY_H #define SECP256K1_CONTRIB_BER_PRIVATEKEY_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { @@ -52,10 +58,10 @@ extern "C" { * simple 32-byte private keys are sufficient. * * Note that this function does not guarantee correct DER output. It is - * guaranteed to be parsable by rustsecp256k1_v0_4_0_ec_privkey_import_der + * guaranteed to be parsable by rustsecp256k1_v0_4_1_ec_privkey_import_der */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( - const rustsecp256k1_v0_4_0_context* ctx, + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *seckey, @@ -77,7 +83,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( * key. */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der( - const rustsecp256k1_v0_4_0_context* ctx, + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *privkey, size_t privkeylen diff --git a/secp256k1-sys/depend/secp256k1/doc/safegcd_implementation.md b/secp256k1-sys/depend/secp256k1/doc/safegcd_implementation.md new file mode 100644 index 0000000..3ae556f --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/doc/safegcd_implementation.md @@ -0,0 +1,765 @@ +# The safegcd implementation in libsecp256k1 explained + +This document explains the modular inverse implementation in the `src/modinv*.h` files. It is based +on the paper +["Fast constant-time gcd computation and modular inversion"](https://gcd.cr.yp.to/papers.html#safegcd) +by Daniel J. Bernstein and Bo-Yin Yang. The references below are for the Date: 2019.04.13 version. + +The actual implementation is in C of course, but for demonstration purposes Python3 is used here. +Most implementation aspects and optimizations are explained, except those that depend on the specific +number representation used in the C code. + +## 1. Computing the Greatest Common Divisor (GCD) using divsteps + +The algorithm from the paper (section 11), at a very high level, is this: + +```python +def gcd(f, g): + """Compute the GCD of an odd integer f and another integer g.""" + assert f & 1 # require f to be odd + delta = 1 # additional state variable + while g != 0: + assert f & 1 # f will be odd in every iteration + if delta > 0 and g & 1: + delta, f, g = 1 - delta, g, (g - f) // 2 + elif g & 1: + delta, f, g = 1 + delta, f, (g + f) // 2 + else: + delta, f, g = 1 + delta, f, (g ) // 2 + return abs(f) +``` + +It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop +keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until +*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a +"division step" (referred to as divstep in what follows). + +For example, *gcd(21, 14)* would be computed as: +- Start with *δ=1 f=21 g=14* +- Take the third branch: *δ=2 f=21 g=7* +- Take the first branch: *δ=-1 f=7 g=-7* +- Take the second branch: *δ=0 f=7 g=0* +- The answer *|f| = 7*. + +Why it works: +- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper): + - (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*. + - (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even). +- Neither of those two operations change the GCD: + - For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a* + and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has + common factor *c*. Reasoning in the other direction shows that no common factor can be added by + doing so either. + - For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove + it from *g*. +- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3). +- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the + gcd of *f'* and *0* is *|f'|* by definition, that is our answer. + +Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at +the low-order bits of the variables to decide the next steps, and being easy to make +constant-time (in more low-level languages than Python). The *δ* parameter is necessary to +guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look +at high order bits. + +Properties that will become important later: +- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*. +- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we + do not need to worry about rounding. +- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N* + bits of *f* and *g*, and on *δ*. + + +## 2. From GCDs to modular inverses + +We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1 +mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is +prime and *0 < x < M*. In what follows, assume that the modular inverse exists. +It turns out this inverse can be computed as a side effect of computing the GCD by keeping track +of how the internal variables can be written as linear combinations of the inputs at every step +(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)). +Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*. +Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x +mod M*. + +A similar approach can be used to calculate modular inverses using the divsteps-based GCD +algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping +track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*. +*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M* +and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M += 1*). + +```python +def div2(M, x): + """Helper routine to compute x/2 mod M (where M is odd).""" + assert M & 1 + if x & 1: # If x is odd, make it even by adding M. + x += M + # x must be even now, so a clean division by 2 is possible. + return x // 2 + +def modinv(M, x): + """Compute the inverse of x mod M (given that it exists, and M is odd).""" + assert M & 1 + delta, f, g, d, e = 1, M, x, 0, 1 + while g != 0: + # Note that while division by two for f and g is only ever done on even inputs, this is + # not true for d and e, so we need the div2 helper function. + if delta > 0 and g & 1: + delta, f, g, d, e = 1 - delta, g, (g - f) // 2, e, div2(M, e - d) + elif g & 1: + delta, f, g, d, e = 1 + delta, f, (g + f) // 2, d, div2(M, e + d) + else: + delta, f, g, d, e = 1 + delta, f, (g ) // 2, d, div2(M, e ) + # Verify that the invariants d=f/x mod M, e=g/x mod M are maintained. + assert f % M == (d * x) % M + assert g % M == (e * x) % M + assert f == 1 or f == -1 # |f| is the GCD, it must be 1 + # Because of invariant d = f/x (mod M), 1/x = d/f (mod M). As |f|=1, d/f = d*f. + return (d * f) % M +``` + +Also note that this approach to track *d* and *e* throughout the computation to determine the inverse +is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the +entire computation is determined (see section 3 below) and the inverse is computed from that. +The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to +be faster at the level of optimization we're able to do in C. + + +## 3. Batching multiple divsteps + +Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)* +to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper): + +``` + t = [ u, v ] + [ q, r ] + + [ out_f ] = (1/2 * t) * [ in_f ] + [ out_g ] = [ in_g ] + + [ out_d ] = (1/2 * t) * [ in_d ] (mod M) + [ out_e ] [ in_e ] +``` + +where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is +taken. As above, the resulting *f* and *g* are always integers. + +Performing multiple divsteps corresponds to a multiplication with the product of all the +individual divsteps' transition matrices. As each transition matrix consists of integers +divided by *2*, the product of these matrices will consist of integers divided by *2N* (see also +theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay +them: we compute the integer coefficients of the combined transition matrix scaled by *2N*, and +do one division by *2N* as a final step: + +```python +def divsteps_n_matrix(delta, f, g): + """Compute delta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 # start with identity matrix + for _ in range(N): + if delta > 0 and g & 1: + delta, f, g, u, v, q, r = 1 - delta, g, (g - f) // 2, 2*q, 2*r, q-u, r-v + elif g & 1: + delta, f, g, u, v, q, r = 1 + delta, f, (g + f) // 2, 2*u, 2*v, q+u, r+v + else: + delta, f, g, u, v, q, r = 1 + delta, f, (g ) // 2, 2*u, 2*v, q , r + return delta, (u, v, q, r) +``` + +As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this +function to compute the transition matrix only needs to see those bottom bits. Furthermore all +intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*, +*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit +integers could set *N=62* and compute the full transition matrix for 62 steps at once without any +big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs +to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps. + +We still need functions to compute: + +``` + [ out_f ] = (1/2^N * [ u, v ]) * [ in_f ] + [ out_g ] ( [ q, r ]) [ in_g ] + + [ out_d ] = (1/2^N * [ u, v ]) * [ in_d ] (mod M) + [ out_e ] ( [ q, r ]) [ in_e ] +``` + +Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f* +and *g* will be multiple of *2N*, and division by *2N* is simply shifting them down: + +```python +def update_fg(f, g, t): + """Multiply matrix t/2^N with [f, g].""" + u, v, q, r = t + cf, cg = u*f + v*g, q*f + r*g + # (t / 2^N) should cleanly apply to [f,g] so the result of t*[f,g] should have N zero + # bottom bits. + assert cf % 2**N == 0 + assert cg % 2**N == 0 + return cf >> N, cg >> N +``` + +The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2N mod M*. +This is easy if we have precomputed *1/M mod 2N* (which always exists for odd *M*): + +```python +def div2n(M, Mi, x): + """Compute x/2^N mod M, given Mi = 1/M mod 2^N.""" + assert (M * Mi) % 2**N == 1 + # Find a factor m such that m*M has the same bottom N bits as x. We want: + # (m * M) mod 2^N = x mod 2^N + # <=> m mod 2^N = (x / M) mod 2^N + # <=> m mod 2^N = (x * Mi) mod 2^N + m = (Mi * x) % 2**N + # Subtract that multiple from x, cancelling its bottom N bits. + x -= m * M + # Now a clean division by 2^N is possible. + assert x % 2**N == 0 + return (x >> N) % M + +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + cd, ce = u*d + v*e, q*d + r*e + return div2n(M, Mi, cd), div2n(M, Mi, ce) +``` + +With all of those, we can write a version of `modinv` that performs *N* divsteps at once: + +```python3 +def modinv(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi=1/M mod 2^N.""" + assert M & 1 + delta, f, g, d, e = 1, M, x, 0, 1 + while g != 0: + # Compute the delta and transition matrix t for the next N divsteps (this only needs + # (N+1)-bit signed integer arithmetic). + delta, t = divsteps_n_matrix(delta, f % 2**N, g % 2**N) + # Apply the transition matrix t to [f, g]: + f, g = update_fg(f, g, t) + # Apply the transition matrix t to [d, e]: + d, e = update_de(d, e, t, M, Mi) + return (d * f) % M +``` + +This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem +because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps +increasing). For variable time code such excess iterations will be mostly optimized away in later +sections. + + +## 4. Avoiding modulus operations + +So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of +`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the +sign of *f*. These are relatively expensive operations when done generically. + +To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range +*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus +operation at the end: + +```python +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e] mod M, given Mi=1/M mod 2^N.""" + u, v, q, r = t + cd, ce = u*d + v*e, q*d + r*e + # Cancel out bottom N bits of cd and ce. + md = -((Mi * cd) % 2**N) + me = -((Mi * ce) % 2**N) + cd += md * M + ce += me * M + # And cleanly divide by 2**N. + return cd >> N, ce >> N +``` + +Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|* +never exceed *2N* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have +outputs whose absolute values are at most *2N* times the maximum absolute input value. In case the +inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming +*M > 1*, the multiplication results in numbers in range *(-2NM,2NM)*. Subtracting less than *2N* +times *M* to cancel out *N* bits brings that up to *(-2N+1M,2NM)*, and +dividing by *2N* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that +to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be +counteracted by incrementing *d* and *e* by *M* whenever they're negative: + +```python + ... + if d < 0: + d += M + if e < 0: + e += M + cd, ce = u*d + v*e, q*d + r*e + # Cancel out bottom N bits of cd and ce. + ... +``` + +With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the +output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de` +invocations there are. In what follows, we will try to make this more efficient. + +Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly, +increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write: + +```python + ... + cd, ce = u*d + v*e, q*d + r*e + # Perform the equivalent of incrementing d, e by M when they're negative. + if d < 0: + cd += u*M + ce += q*M + if e < 0: + cd += v*M + ce += r*M + # Cancel out bottom N bits of cd and ce. + md = -((Mi * cd) % 2**N) + me = -((Mi * ce) % 2**N) + cd += md * M + ce += me * M + ... +``` + +Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this +increment, and the decrement that cancels out bottom bits. The second one depends on the first +one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce* +at first, and using that to compute the final *md*, *me* values: + +```python +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + md, me = 0, 0 + # Compute what multiples of M to add to cd and ce. + if d < 0: + md += u + me += q + if e < 0: + md += v + me += r + # Compute bottom N bits of t*[d,e] + M*[md,me]. + cd, ce = (u*d + v*e + md*M) % 2**N, (q*d + r*e + me*M) % 2**N + # Correct md and me such that the bottom N bits of t*[d,e] + M*[md,me] are zero. + md -= (Mi * cd) % 2**N + me -= (Mi * ce) % 2**N + # Do the full computation. + cd, ce = u*d + v*e + md*M, q*d + r*e + me*M + # And cleanly divide by 2**N. + return cd >> N, ce >> N +``` + +One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd* +and *ce* by moving them to the *md* and *me* correction: + +```python + ... + # Compute bottom N bits of t*[d,e]. + cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N + # Correct md and me such that the bottom N bits of t*[d,e]+M*[md,me] are zero. + # Note that this is not the same as {md = (-Mi * cd) % 2**N} etc. That would also result in N + # zero bottom bits, but isn't guaranteed to be a reduction of [0,2^N) compared to the + # previous md and me values, and thus would violate our bounds analysis. + md -= (Mi*cd + md) % 2**N + me -= (Mi*ce + me) % 2**N + ... +``` + +The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same +range. That also means that the *d* value at the end of `modinv` will be in that range, while we want +a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the +conditional negation of *d* (based on the sign of *f*) into it as well: + +```python +def normalize(sign, v, M): + """Compute sign*v mod M, where v is in range (-2*M,M); output in [0,M).""" + assert sign == 1 or sign == -1 + # v in (-2*M,M) + if v < 0: + v += M + # v in (-M,M). Now multiply v with sign (which can only be 1 or -1). + if sign == -1: + v = -v + # v in (-M,M) + if v < 0: + v += M + # v in [0,M) + return v +``` + +And calling it in `modinv` is simply: + +```python + ... + return normalize(f, d, M) +``` + + +## 5. Constant-time operation + +The primary selling point of the algorithm is fast constant-time operation. What code flow still +depends on the input data so far? + +- the number of iterations of the while *g ≠ 0* loop in `modinv` +- the branches inside `divsteps_n_matrix` +- the sign checks in `update_de` +- the sign checks in `normalize` + +To make the while loop in `modinv` constant time it can be replaced with a constant number of +iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit +inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is +sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of +*⌈724/N⌉* times. + +To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise +operations (and hope the C compiler isn't smart enough to turn them back into branches; see +`valgrind_ctime_test.c` for automated tests that this isn't the case). To do so, observe that a +divstep can be written instead as (compare to the inner loop of `gcd` in section 1). + +```python + x = -f if delta > 0 else f # set x equal to (input) -f or f + if g & 1: + g += x # set g to (input) g-f or g+f + if delta > 0: + delta = -delta + f += g # set f to (input) g (note that g was set to g-f before) + delta += 1 + g >>= 1 +``` + +To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the +definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As +*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows +that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then +*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*. + +Using this we can write: + +```python + x = -f if delta > 0 else f +``` + +in constant-time form as: + +```python + c1 = (-delta) >> 63 + # Conditionally negate f based on c1: + x = (f ^ c1) - c1 +``` + +To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1* +(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by +the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see +`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all +numbers in range *[-263,0)* to *-1*, and numbers in range *[0,263)* to *0*. + +Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write: + +```python + if g & 1: + g += x +``` + +as: + +```python + # Compute c2=0 if g is even and c2=-1 if g is odd. + c2 = -(g & 1) + # This masks out x if g is even, and leaves x be if g is odd. + g += x & c2 +``` + +Using the conditional negation trick again we can write: + +```python + if g & 1: + if delta > 0: + delta = -delta +``` + +as: + +```python + # Compute c3=-1 if g is odd and delta>0, and 0 otherwise. + c3 = c1 & c2 + # Conditionally negate delta based on c3: + delta = (delta ^ c3) - c3 +``` + +Finally: + +```python + if g & 1: + if delta > 0: + f += g +``` + +becomes: + +```python + f += g & c3 +``` + +It turns out that this can be implemented more efficiently by applying the substitution +*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing +*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1* +computation: + +```python + # Compute a mask c1 for eta < 0, and compute the conditional negation x of f: + c1 = eta >> 63 + x = (f ^ c1) - c1 + # Compute a mask c2 for odd g, and conditionally add x to g: + c2 = -(g & 1) + g += x & c2 + # Compute a mask c for (eta < 0) and odd (input) g, and use it to conditionally negate eta, + # and add g to f: + c3 = c1 & c2 + eta = (eta ^ c3) - c3 + f += g & c3 + # Incrementing delta corresponds to decrementing eta. + eta -= 1 + g >>= 1 +``` + +A variant of divsteps with better worst-case performance can be used instead: starting *δ* at +*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs +(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)* +is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to +decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or +*~ζ*. Doing that conditionally based on *c3* is simply: + +```python + ... + c3 = c1 & c2 + zeta ^= c3 + ... +``` + +By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to +also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of +`divsteps_n_matrix` is obtained. The full code will be in section 7. + +These bit fiddling tricks can also be used to make the conditional negations and additions in +`update_de` and `normalize` constant-time. + + +## 6. Variable-time optimizations + +In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time. +Constant time operations are only necessary when computing modular inverses of secret data. In +other cases, it slows down calculations unnecessarily. In this section, we will construct a +faster non-constant time `divsteps_n_matrix` function. + +To do so, first consider yet another way of writing the inner loop of divstep operations in +`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use +the original version with initial *δ=1* and *η=-δ* here. + +```python +for _ in range(N): + if g & 1 and eta < 0: + eta, f, g = -eta, g, -f + if g & 1: + g += f + eta -= 1 + g >>= 1 +``` + +Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero +bits, these iterations can be consolidated into one step. This requires counting the bottom zero +bits efficiently, which is possible on most platforms; it is abstracted here as the function +`count_trailing_zeros`. + +```python +def count_trailing_zeros(v): + """For a non-zero value v, find z such that v=(d<>= zeros + i -= zeros + if i == 0: + break + # We know g is odd now + if eta < 0: + eta, f, g = -eta, g, -f + g += f + # g is even now, and the eta decrement and g shift will happen in the next loop. +``` + +We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever +there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as +well. + +Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom +bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η* +becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to +*g* to cancel out multiple bottom bits, and then shifting them out. + +It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom +zero bits. If that number of bits is *L*, we want *g+w f mod 2L = 0*, or *w = -g/f mod 2L*. Since *f* +is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before +doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, f` at that point), but +apart from that, we're only limited by the complexity of computing *w*. + +This code demonstrates how to cancel up to 4 bits per step: + +```python +NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n +i = N +while True: + zeros = min(i, count_trailing_zeros(g)) + eta -= zeros + g >>= zeros + i -= zeros + if i == 0: + break + # We know g is odd now + if eta < 0: + eta, f, g = -eta, g, f + # Compute limit on number of bits to cancel + limit = min(min(eta + 1, i), 4) + # Compute w = -g/f mod 2**limit, using the table value for -1/f mod 2**4. Note that f is + # always odd, so its inverse modulo a power of two always exists. + w = (g * NEGINV16[(f & 15) // 2]) % (2**limit) + # As w = -g/f mod (2**limit), g+w*f mod 2**limit = 0 mod 2**limit. + g += w * f + assert g % (2**limit) == 0 + # The next iteration will now shift out at least limit bottom zero bits from g. +``` + +By using a bigger table more bits can be cancelled at once. The table can also be implemented +as a formula. Several formulas are known for computing modular inverses modulo powers of two; +some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pages 245-247. +Here we need the negated modular inverse, which is a simple transformation of those: + +- Instead of a 3-bit table: + - *-f* or *f ^ 6* +- Instead of a 4-bit table: + - *1 - f(f + 1)* + - *-(f + (((f + 1) & 4) << 1))* +- For larger tables the following technique can be used: if *w=-1/f mod 2L*, then *w(w f+2)* is + *-1/f mod 22L*. This allows extending the previous formulas (or tables). In particular we + have this 6-bit function (based on the 3-bit function above): + - *f(f2 - 2)* + +This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in +`divsteps_n_matrix`, gives a significantly faster, but non-constant time version. + + +## 7. Final Python version + +All together we need the following functions: + +- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function + from section 2, but with its loop replaced by a variant of the constant-time divstep from + section 5, extended to handle *u*, *v*, *q*, *r*: + +```python +def divsteps_n_matrix(zeta, f, g): + """Compute zeta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 # start with identity matrix + for _ in range(N): + c1 = zeta >> 63 + # Compute x, y, z as conditionally-negated versions of f, u, v. + x, y, z = (f ^ c1) - c1, (u ^ c1) - c1, (v ^ c1) - c1 + c2 = -(g & 1) + # Conditionally add x, y, z to g, q, r. + g, q, r = g + (x & c2), q + (y & c2), r + (z & c2) + c1 &= c2 # reusing c1 here for the earlier c3 variable + zeta = (zeta ^ c1) - 1 # inlining the unconditional zeta decrement here + # Conditionally add g, q, r to f, u, v. + f, u, v = f + (g & c1), u + (q & c1), v + (r & c1) + # When shifting g down, don't shift q, r, as we construct a transition matrix multiplied + # by 2^N. Instead, shift f's coefficients u and v up. + g, u, v = g >> 1, u << 1, v << 1 + return zeta, (u, v, q, r) +``` + +- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time + changes to `update_de` from section 5: + +```python +def update_fg(f, g, t): + """Multiply matrix t/2^N with [f, g].""" + u, v, q, r = t + cf, cg = u*f + v*g, q*f + r*g + return cf >> N, cg >> N + +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + d_sign, e_sign = d >> 257, e >> 257 + md, me = (u & d_sign) + (v & e_sign), (q & d_sign) + (r & e_sign) + cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N + md -= (Mi*cd + md) % 2**N + me -= (Mi*ce + me) % 2**N + cd, ce = u*d + v*e + M*md, q*d + r*e + M*me + return cd >> N, ce >> N +``` + +- The `normalize` function from section 4, made constant time as well: + +```python +def normalize(sign, v, M): + """Compute sign*v mod M, where v in (-2*M,M); output in [0,M).""" + v_sign = v >> 257 + # Conditionally add M to v. + v += M & v_sign + c = (sign - 1) >> 1 + # Conditionally negate v. + v = (v ^ c) - c + v_sign = v >> 257 + # Conditionally add M to v again. + v += M & v_sign + return v +``` + +- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed + iteration count from section 5: + +```python +def modinv(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi=1/M mod 2^N.""" + zeta, f, g, d, e = -1, M, x, 0, 1 + for _ in range((590 + N - 1) // N): + zeta, t = divsteps_n_matrix(zeta, f % 2**N, g % 2**N) + f, g = update_fg(f, g, t) + d, e = update_de(d, e, t, M, Mi) + return normalize(f, d, M) +``` + +- To get a variable time version, replace the `divsteps_n_matrix` function with one that uses the + divsteps loop from section 5, and a `modinv` version that calls it without the fixed iteration + count: + +```python +NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n +def divsteps_n_matrix_var(eta, f, g): + """Compute eta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 + i = N + while True: + zeros = min(i, count_trailing_zeros(g)) + eta, i = eta - zeros, i - zeros + g, u, v = g >> zeros, u << zeros, v << zeros + if i == 0: + break + if eta < 0: + eta, f, u, v, g, q, r = -eta, g, q, r, -f, -u, -v + limit = min(min(eta + 1, i), 4) + w = (g * NEGINV16[(f & 15) // 2]) % (2**limit) + g, q, r = g + w*f, q + w*u, r + w*v + return eta, (u, v, q, r) + +def modinv_var(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi = 1/M mod 2^N.""" + eta, f, g, d, e = -1, M, x, 0, 1 + while g != 0: + eta, t = divsteps_n_matrix_var(eta, f % 2**N, g % 2**N) + f, g = update_fg(f, g, t) + d, e = update_de(d, e, t, M, Mi) + return normalize(f, d, Mi) +``` diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h b/secp256k1-sys/depend/secp256k1/include/secp256k1.h index 107e3c7..cbbebfb 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h @@ -7,11 +7,13 @@ extern "C" { #include -/* These rules specify the order of arguments in API calls: +/* Unless explicitly stated all pointer arguments must not be NULL. + * + * The following rules specify the order of arguments in API calls: * * 1. Context pointers go first, followed by output arguments, combined * output/input arguments, and finally input-only arguments. - * 2. Array lengths always immediately the follow the argument whose length + * 2. Array lengths always immediately follow the argument whose length * they describe, even if this violates rule 1. * 3. Within the OUT/OUTIN/IN groups, pointers to data that is typically generated * later go first. This means: signatures, public nonces, secret nonces, @@ -35,13 +37,13 @@ extern "C" { * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * rustsecp256k1_v0_4_0_context_destroy, rustsecp256k1_v0_4_0_context_preallocated_destroy, - * and rustsecp256k1_v0_4_0_context_randomize. + * rustsecp256k1_v0_4_1_context_destroy, rustsecp256k1_v0_4_1_context_preallocated_destroy, + * and rustsecp256k1_v0_4_1_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context; +typedef struct rustsecp256k1_v0_4_1_context_struct rustsecp256k1_v0_4_1_context; /** Opaque data structure that holds rewriteable "scratch space" * @@ -54,19 +56,20 @@ typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct rustsecp256k1_v0_4_0_scratch_space_struct rustsecp256k1_v0_4_0_scratch_space; +typedef struct rustsecp256k1_v0_4_1_scratch_space_struct rustsecp256k1_v0_4_1_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1_v0_4_0_ec_pubkey_serialize and rustsecp256k1_v0_4_0_ec_pubkey_parse. + * If you need to convert to a format suitable for storage or transmission, + * use rustsecp256k1_v0_4_1_ec_pubkey_serialize and rustsecp256k1_v0_4_1_ec_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_4_1_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_4_0_pubkey; +} rustsecp256k1_v0_4_1_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -74,12 +77,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_4_0_ecdsa_signature; +} rustsecp256k1_v0_4_1_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -97,7 +100,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*rustsecp256k1_v0_4_0_nonce_function)( +typedef int (*rustsecp256k1_v0_4_1_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -127,6 +130,17 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( # define SECP256K1_INLINE inline # endif +/** When this header is used at build-time the SECP256K1_BUILD define needs to be set + * to correctly setup export attributes and nullness checks. This is normally done + * by secp256k1.c but to guard against this header being included before secp256k1.c + * has had a chance to set the define (e.g. via test harnesses that just includes + * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the + * BUILD define so this condition can be caught. + */ +#ifndef SECP256K1_BUILD +# define SECP256K1_NO_BUILD +#endif + #ifndef SECP256K1_API # if defined(_WIN32) # ifdef SECP256K1_BUILD @@ -165,14 +179,14 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( #define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to rustsecp256k1_v0_4_0_context_create, rustsecp256k1_v0_4_0_context_preallocated_size, and - * rustsecp256k1_v0_4_0_context_preallocated_create. */ +/** Flags to pass to rustsecp256k1_v0_4_1_context_create, rustsecp256k1_v0_4_1_context_preallocated_size, and + * rustsecp256k1_v0_4_1_context_preallocated_create. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) #define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY) #define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) -/** Flag to pass to rustsecp256k1_v0_4_0_ec_pubkey_serialize. */ +/** Flag to pass to rustsecp256k1_v0_4_1_ec_pubkey_serialize. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -188,25 +202,25 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( * API consistency, but currently do not require expensive precomputations or dynamic * allocations. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp; +SECP256K1_API extern const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp; /** Create a secp256k1 context object (in dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h. * * Returns: a newly created context object. * In: flags: which parts of the context to initialize. * - * See also rustsecp256k1_v0_4_0_context_randomize. + * See also rustsecp256k1_v0_4_1_context_randomize. */ /** Copy a secp256k1 context object (into dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h. * * Returns: a newly created context object. * Args: ctx: an existing context to copy (cannot be NULL) @@ -216,14 +230,14 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co * * The context pointer may not be used afterwards. * - * The context to destroy must have been created using rustsecp256k1_v0_4_0_context_create - * or rustsecp256k1_v0_4_0_context_clone. If the context has instead been created using - * rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone, the - * behaviour is undefined. In that case, rustsecp256k1_v0_4_0_context_preallocated_destroy must + * The context to destroy must have been created using rustsecp256k1_v0_4_1_context_create + * or rustsecp256k1_v0_4_1_context_clone. If the context has instead been created using + * rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_4_1_context_preallocated_destroy must * be used instead. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_4_0_context_create or rustsecp256k1_v0_4_0_context_clone + * rustsecp256k1_v0_4_1_context_create or rustsecp256k1_v0_4_1_context_clone */ /** Set a callback function to be called when an illegal argument is passed to @@ -247,11 +261,11 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* message, void* data); - * - void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* message, void* data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using rustsecp256k1_v0_4_0_context_set_illegal_callback or - * rustsecp256k1_v0_4_0_context_set_error_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_4_1_context_set_illegal_callback or + * rustsecp256k1_v0_4_1_context_set_error_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -261,10 +275,10 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above. * - * See also rustsecp256k1_v0_4_0_context_set_error_callback. + * See also rustsecp256k1_v0_4_1_context_set_error_callback. */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_4_1_context_set_illegal_callback( + rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -275,21 +289,21 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback( * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see rustsecp256k1_v0_4_0_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_4_1_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object (cannot be NULL) * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see rustsecp256k1_v0_4_0_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_4_1_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above. * - * See also rustsecp256k1_v0_4_0_context_set_illegal_callback. + * See also rustsecp256k1_v0_4_1_context_set_illegal_callback. */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_4_1_context_set_error_callback( + rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -323,9 +337,9 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_parse( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_parse( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey* pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -340,19 +354,34 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_pa * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ec_pubkey_serialize( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_4_0_pubkey* pubkey, + const rustsecp256k1_v0_4_1_pubkey* pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Compare two public keys using lexicographic (of compressed serialization) order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_cmp( + const rustsecp256k1_v0_4_1_context* ctx, + const rustsecp256k1_v0_4_1_pubkey* pubkey1, + const rustsecp256k1_v0_4_1_pubkey* pubkey2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Parse an ECDSA signature in compact (64 bytes) format. * * Returns: 1 when the signature could be parsed, 0 otherwise. @@ -368,9 +397,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize( * S are zero, the resulting sig value is guaranteed to fail validation for any * message and public key. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -389,9 +418,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact( * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -407,11 +436,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_4_0_ecdsa_signature* sig + const rustsecp256k1_v0_4_1_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -421,12 +450,12 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, - const rustsecp256k1_v0_4_0_ecdsa_signature* sig + const rustsecp256k1_v0_4_1_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. @@ -449,16 +478,16 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply rustsecp256k1_v0_4_0_ecdsa_signature_normalize to the signature prior to + * rule, apply rustsecp256k1_v0_4_1_ecdsa_signature_normalize to the signature prior to * validation, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify( - const rustsecp256k1_v0_4_0_context* ctx, - const rustsecp256k1_v0_4_0_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_verify( + const rustsecp256k1_v0_4_1_context* ctx, + const rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, - const rustsecp256k1_v0_4_0_pubkey *pubkey + const rustsecp256k1_v0_4_1_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -498,25 +527,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The rustsecp256k1_v0_4_0_ecdsa_sign function will by default create signatures in the - * lower-S form, and rustsecp256k1_v0_4_0_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_4_1_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_4_1_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * rustsecp256k1_v0_4_0_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_4_1_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_normalize( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature *sigout, - const rustsecp256k1_v0_4_0_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_normalize( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature *sigout, + const rustsecp256k1_v0_4_1_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * extra entropy. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979; +SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979; -/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_0_nonce_function_rfc6979). */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default; +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_1_nonce_function_rfc6979). */ +SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default; /** Create an ECDSA signature. * @@ -526,18 +555,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_ * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) * In: msghash32: the 32-byte message hash being signed (cannot be NULL) * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used + * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) * * The created signature is always in lower-S form. See - * rustsecp256k1_v0_4_0_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_4_1_ecdsa_signature_normalize for more details. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_4_0_nonce_function noncefp, + rustsecp256k1_v0_4_1_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -553,8 +582,8 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign( * Args: ctx: pointer to a context object (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_verify( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_verify( + const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -566,32 +595,32 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_ve * Out: pubkey: pointer to the created public key (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_create( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_create( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Negates a secret key in place. * * Returns: 0 if the given secret key is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify. 1 otherwise + * rustsecp256k1_v0_4_1_ec_seckey_verify. 1 otherwise * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte secret key to be negated. If the * secret key is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0 and + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0 and * seckey will be set to some unspecified value. (cannot be * NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_negate( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_negate( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_negate, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_negate, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_negate( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_negate( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -601,9 +630,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_n * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_negate( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_negate( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a secret key by adding tweak to it. @@ -613,24 +642,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_ne * otherwise. * Args: ctx: pointer to a context object (cannot be NULL). * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. (cannot be NULL) * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_add, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -645,13 +674,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0 (cannot be NULL). * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -660,24 +689,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object (cannot be NULL). * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. (cannot be NULL) * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -690,13 +719,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0 (cannot be NULL). * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -721,12 +750,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw * guaranteed and may change in the future. It is safe to call this function on * contexts not initialized for signing; then it will have no effect and return 1. * - * You should call this after rustsecp256k1_v0_4_0_context_create or - * rustsecp256k1_v0_4_0_context_clone (and rustsecp256k1_v0_4_0_context_preallocated_create or - * rustsecp256k1_v0_4_0_context_clone, resp.), and you may call this repeatedly afterwards. + * You should call this after rustsecp256k1_v0_4_1_context_create or + * rustsecp256k1_v0_4_1_context_clone (and rustsecp256k1_v0_4_1_context_preallocated_create or + * rustsecp256k1_v0_4_1_context_clone, resp.), and you may call this repeatedly afterwards. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_randomize( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_context_randomize( + rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -740,10 +769,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_rand * In: ins: pointer to array of pointers to public keys (cannot be NULL) * n: the number of public keys to add together (must be at least 1) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_combine( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *out, - const rustsecp256k1_v0_4_0_pubkey * const * ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_combine( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *out, + const rustsecp256k1_v0_4_1_pubkey * const * ins, size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig b/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig index 0d40f74..4098da3 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig @@ -7,11 +7,13 @@ extern "C" { #include -/* These rules specify the order of arguments in API calls: +/* Unless explicitly stated all pointer arguments must not be NULL. + * + * The following rules specify the order of arguments in API calls: * * 1. Context pointers go first, followed by output arguments, combined * output/input arguments, and finally input-only arguments. - * 2. Array lengths always immediately the follow the argument whose length + * 2. Array lengths always immediately follow the argument whose length * they describe, even if this violates rule 1. * 3. Within the OUT/OUTIN/IN groups, pointers to data that is typically generated * later go first. This means: signatures, public nonces, secret nonces, @@ -35,13 +37,13 @@ extern "C" { * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * rustsecp256k1_v0_4_0_context_destroy, rustsecp256k1_v0_4_0_context_preallocated_destroy, - * and rustsecp256k1_v0_4_0_context_randomize. + * rustsecp256k1_v0_4_1_context_destroy, rustsecp256k1_v0_4_1_context_preallocated_destroy, + * and rustsecp256k1_v0_4_1_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context; +typedef struct rustsecp256k1_v0_4_1_context_struct rustsecp256k1_v0_4_1_context; /** Opaque data structure that holds rewriteable "scratch space" * @@ -54,19 +56,20 @@ typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct rustsecp256k1_v0_4_0_scratch_space_struct rustsecp256k1_v0_4_0_scratch_space; +typedef struct rustsecp256k1_v0_4_1_scratch_space_struct rustsecp256k1_v0_4_1_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1_v0_4_0_ec_pubkey_serialize and rustsecp256k1_v0_4_0_ec_pubkey_parse. + * If you need to convert to a format suitable for storage or transmission, + * use rustsecp256k1_v0_4_1_ec_pubkey_serialize and rustsecp256k1_v0_4_1_ec_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_4_1_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_4_0_pubkey; +} rustsecp256k1_v0_4_1_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -74,12 +77,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_4_0_ecdsa_signature; +} rustsecp256k1_v0_4_1_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -97,7 +100,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*rustsecp256k1_v0_4_0_nonce_function)( +typedef int (*rustsecp256k1_v0_4_1_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -127,6 +130,17 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( # define SECP256K1_INLINE inline # endif +/** When this header is used at build-time the SECP256K1_BUILD define needs to be set + * to correctly setup export attributes and nullness checks. This is normally done + * by secp256k1.c but to guard against this header being included before secp256k1.c + * has had a chance to set the define (e.g. via test harnesses that just includes + * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the + * BUILD define so this condition can be caught. + */ +#ifndef SECP256K1_BUILD +# define SECP256K1_NO_BUILD +#endif + #ifndef SECP256K1_API # if defined(_WIN32) # ifdef SECP256K1_BUILD @@ -165,14 +179,14 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( #define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to rustsecp256k1_v0_4_0_context_create, rustsecp256k1_v0_4_0_context_preallocated_size, and - * rustsecp256k1_v0_4_0_context_preallocated_create. */ +/** Flags to pass to rustsecp256k1_v0_4_1_context_create, rustsecp256k1_v0_4_1_context_preallocated_size, and + * rustsecp256k1_v0_4_1_context_preallocated_create. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) #define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY) #define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) -/** Flag to pass to rustsecp256k1_v0_4_0_ec_pubkey_serialize. */ +/** Flag to pass to rustsecp256k1_v0_4_1_ec_pubkey_serialize. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -188,20 +202,20 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)( * API consistency, but currently do not require expensive precomputations or dynamic * allocations. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp; +SECP256K1_API extern const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp; /** Create a secp256k1 context object (in dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h. * * Returns: a newly created context object. * In: flags: which parts of the context to initialize. * - * See also rustsecp256k1_v0_4_0_context_randomize. + * See also rustsecp256k1_v0_4_1_context_randomize. */ -SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create( +SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_create( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; @@ -209,30 +223,30 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create( * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h. * * Returns: a newly created context object. * Args: ctx: an existing context to copy (cannot be NULL) */ -SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_clone( - const rustsecp256k1_v0_4_0_context* ctx +SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_clone( + const rustsecp256k1_v0_4_1_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Destroy a secp256k1 context object (created in dynamically allocated memory). * * The context pointer may not be used afterwards. * - * The context to destroy must have been created using rustsecp256k1_v0_4_0_context_create - * or rustsecp256k1_v0_4_0_context_clone. If the context has instead been created using - * rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone, the - * behaviour is undefined. In that case, rustsecp256k1_v0_4_0_context_preallocated_destroy must + * The context to destroy must have been created using rustsecp256k1_v0_4_1_context_create + * or rustsecp256k1_v0_4_1_context_clone. If the context has instead been created using + * rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_4_1_context_preallocated_destroy must * be used instead. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_4_0_context_create or rustsecp256k1_v0_4_0_context_clone + * rustsecp256k1_v0_4_1_context_create or rustsecp256k1_v0_4_1_context_clone */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy( - rustsecp256k1_v0_4_0_context* ctx +SECP256K1_API void rustsecp256k1_v0_4_1_context_destroy( + rustsecp256k1_v0_4_1_context* ctx ); /** Set a callback function to be called when an illegal argument is passed to @@ -256,11 +270,11 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy( * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* message, void* data); - * - void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* message, void* data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using rustsecp256k1_v0_4_0_context_set_illegal_callback or - * rustsecp256k1_v0_4_0_context_set_error_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_4_1_context_set_illegal_callback or + * rustsecp256k1_v0_4_1_context_set_error_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -270,10 +284,10 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy( * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above. * - * See also rustsecp256k1_v0_4_0_context_set_error_callback. + * See also rustsecp256k1_v0_4_1_context_set_error_callback. */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_4_1_context_set_illegal_callback( + rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -284,21 +298,21 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback( * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see rustsecp256k1_v0_4_0_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_4_1_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object (cannot be NULL) * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see rustsecp256k1_v0_4_0_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_4_1_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above. * - * See also rustsecp256k1_v0_4_0_context_set_illegal_callback. + * See also rustsecp256k1_v0_4_1_context_set_illegal_callback. */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_4_1_context_set_error_callback( + rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -310,8 +324,8 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback( * In: size: amount of memory to be available as scratch space. Some extra * (<100 bytes) will be allocated for extra accounting. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_0_scratch_space* rustsecp256k1_v0_4_0_scratch_space_create( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_1_scratch_space* rustsecp256k1_v0_4_1_scratch_space_create( + const rustsecp256k1_v0_4_1_context* ctx, size_t size ) SECP256K1_ARG_NONNULL(1); @@ -321,9 +335,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_0_scratch_space* r * Args: ctx: a secp256k1 context object. * scratch: space to destroy */ -SECP256K1_API void rustsecp256k1_v0_4_0_scratch_space_destroy( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_scratch_space* scratch +SECP256K1_API void rustsecp256k1_v0_4_1_scratch_space_destroy( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_scratch_space* scratch ) SECP256K1_ARG_NONNULL(1); /** Parse a variable-length public key into the pubkey object. @@ -340,9 +354,9 @@ SECP256K1_API void rustsecp256k1_v0_4_0_scratch_space_destroy( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_parse( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_parse( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey* pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -357,19 +371,34 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_pa * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ec_pubkey_serialize( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_4_0_pubkey* pubkey, + const rustsecp256k1_v0_4_1_pubkey* pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Compare two public keys using lexicographic (of compressed serialization) order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_cmp( + const rustsecp256k1_v0_4_1_context* ctx, + const rustsecp256k1_v0_4_1_pubkey* pubkey1, + const rustsecp256k1_v0_4_1_pubkey* pubkey2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Parse an ECDSA signature in compact (64 bytes) format. * * Returns: 1 when the signature could be parsed, 0 otherwise. @@ -385,9 +414,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize( * S are zero, the resulting sig value is guaranteed to fail validation for any * message and public key. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -406,9 +435,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact( * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -424,11 +453,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_4_0_ecdsa_signature* sig + const rustsecp256k1_v0_4_1_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -438,12 +467,12 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, - const rustsecp256k1_v0_4_0_ecdsa_signature* sig + const rustsecp256k1_v0_4_1_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. @@ -466,16 +495,16 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply rustsecp256k1_v0_4_0_ecdsa_signature_normalize to the signature prior to + * rule, apply rustsecp256k1_v0_4_1_ecdsa_signature_normalize to the signature prior to * validation, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify( - const rustsecp256k1_v0_4_0_context* ctx, - const rustsecp256k1_v0_4_0_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_verify( + const rustsecp256k1_v0_4_1_context* ctx, + const rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, - const rustsecp256k1_v0_4_0_pubkey *pubkey + const rustsecp256k1_v0_4_1_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -515,25 +544,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The rustsecp256k1_v0_4_0_ecdsa_sign function will by default create signatures in the - * lower-S form, and rustsecp256k1_v0_4_0_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_4_1_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_4_1_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * rustsecp256k1_v0_4_0_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_4_1_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_normalize( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature *sigout, - const rustsecp256k1_v0_4_0_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_normalize( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature *sigout, + const rustsecp256k1_v0_4_1_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * extra entropy. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979; +SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979; -/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_0_nonce_function_rfc6979). */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default; +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_1_nonce_function_rfc6979). */ +SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default; /** Create an ECDSA signature. * @@ -543,18 +572,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_ * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) * In: msghash32: the 32-byte message hash being signed (cannot be NULL) * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used + * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) * * The created signature is always in lower-S form. See - * rustsecp256k1_v0_4_0_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_4_1_ecdsa_signature_normalize for more details. */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_4_0_nonce_function noncefp, + rustsecp256k1_v0_4_1_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -570,8 +599,8 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign( * Args: ctx: pointer to a context object (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_verify( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_verify( + const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -583,32 +612,32 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_ve * Out: pubkey: pointer to the created public key (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_create( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_create( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Negates a secret key in place. * * Returns: 0 if the given secret key is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify. 1 otherwise + * rustsecp256k1_v0_4_1_ec_seckey_verify. 1 otherwise * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte secret key to be negated. If the * secret key is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0 and + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0 and * seckey will be set to some unspecified value. (cannot be * NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_negate( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_negate( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_negate, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_negate, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_negate( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_negate( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -618,9 +647,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_n * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_negate( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_negate( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a secret key by adding tweak to it. @@ -630,24 +659,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_ne * otherwise. * Args: ctx: pointer to a context object (cannot be NULL). * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. (cannot be NULL) * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_add, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -662,13 +691,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0 (cannot be NULL). * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -677,24 +706,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object (cannot be NULL). * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. (cannot be NULL) * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -707,13 +736,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0 (cannot be NULL). * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -738,12 +767,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw * guaranteed and may change in the future. It is safe to call this function on * contexts not initialized for signing; then it will have no effect and return 1. * - * You should call this after rustsecp256k1_v0_4_0_context_create or - * rustsecp256k1_v0_4_0_context_clone (and rustsecp256k1_v0_4_0_context_preallocated_create or - * rustsecp256k1_v0_4_0_context_clone, resp.), and you may call this repeatedly afterwards. + * You should call this after rustsecp256k1_v0_4_1_context_create or + * rustsecp256k1_v0_4_1_context_clone (and rustsecp256k1_v0_4_1_context_preallocated_create or + * rustsecp256k1_v0_4_1_context_clone, resp.), and you may call this repeatedly afterwards. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_randomize( - rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_context_randomize( + rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -757,10 +786,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_rand * In: ins: pointer to array of pointers to public keys (cannot be NULL) * n: the number of public keys to add together (must be at least 1) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_combine( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *out, - const rustsecp256k1_v0_4_0_pubkey * const * ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_combine( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *out, + const rustsecp256k1_v0_4_1_pubkey * const * ins, size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h index 44d28a2..9c39389 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h @@ -10,15 +10,15 @@ extern "C" { /** A pointer to a function that hashes an EC point to obtain an ECDH secret * * Returns: 1 if the point was successfully hashed. - * 0 will cause rustsecp256k1_v0_4_0_ecdh to fail and return 0. + * 0 will cause rustsecp256k1_v0_4_1_ecdh to fail and return 0. * Other return values are not allowed, and the behaviour of - * rustsecp256k1_v0_4_0_ecdh is undefined for other return values. + * rustsecp256k1_v0_4_1_ecdh is undefined for other return values. * Out: output: pointer to an array to be filled by the function * In: x32: pointer to a 32-byte x coordinate * y32: pointer to a 32-byte y coordinate * data: arbitrary data pointer that is passed through */ -typedef int (*rustsecp256k1_v0_4_0_ecdh_hash_function)( +typedef int (*rustsecp256k1_v0_4_1_ecdh_hash_function)( unsigned char *output, const unsigned char *x32, const unsigned char *y32, @@ -27,11 +27,11 @@ typedef int (*rustsecp256k1_v0_4_0_ecdh_hash_function)( /** An implementation of SHA256 hash function that applies to compressed public key. * Populates the output parameter with 32 bytes. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_sha256; +SECP256K1_API extern const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_sha256; -/** A default ECDH hash function (currently equal to rustsecp256k1_v0_4_0_ecdh_hash_function_sha256). +/** A default ECDH hash function (currently equal to rustsecp256k1_v0_4_1_ecdh_hash_function_sha256). * Populates the output parameter with 32 bytes. */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_default; +SECP256K1_API extern const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_default; /** Compute an EC Diffie-Hellman secret in constant time * @@ -39,19 +39,19 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1 * 0: scalar was invalid (zero or overflow) or hashfp returned 0 * Args: ctx: pointer to a context object (cannot be NULL) * Out: output: pointer to an array to be filled by hashfp - * In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an * initialized public key * seckey: a 32-byte scalar with which to multiply the point - * hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_4_0_ecdh_hash_function_sha256 is used + * hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_4_1_ecdh_hash_function_sha256 is used * (in which case, 32 bytes will be written to output) * data: arbitrary data pointer that is passed through to hashfp */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdh( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdh( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, - const rustsecp256k1_v0_4_0_pubkey *pubkey, + const rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey, - rustsecp256k1_v0_4_0_ecdh_hash_function hashfp, + rustsecp256k1_v0_4_1_ecdh_hash_function hashfp, void *data ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h index b28cb2f..8e74bab 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h @@ -15,13 +15,13 @@ extern "C" { * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1_v0_4_0_xonly_pubkey_serialize and - * rustsecp256k1_v0_4_0_xonly_pubkey_parse. + * If you need to convert to a format suitable for storage, transmission, use + * use rustsecp256k1_v0_4_1_xonly_pubkey_serialize and rustsecp256k1_v0_4_1_xonly_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_4_1_xonly_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_4_0_xonly_pubkey; +} rustsecp256k1_v0_4_1_xonly_pubkey; /** Opaque data structure that holds a keypair consisting of a secret and a * public key. @@ -32,7 +32,7 @@ typedef struct { */ typedef struct { unsigned char data[96]; -} rustsecp256k1_v0_4_0_keypair; +} rustsecp256k1_v0_4_1_keypair; /** Parse a 32-byte sequence into a xonly_pubkey object. * @@ -45,9 +45,9 @@ typedef struct { * (cannot be NULL). * In: input32: pointer to a serialized xonly_pubkey (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_parse( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_xonly_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_parse( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_xonly_pubkey* pubkey, const unsigned char *input32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -58,16 +58,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey * Args: ctx: a secp256k1 context object (cannot be NULL). * Out: output32: a pointer to a 32-byte array to place the serialized key in * (cannot be NULL). - * In: pubkey: a pointer to a rustsecp256k1_v0_4_0_xonly_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_4_1_xonly_pubkey containing an * initialized public key (cannot be NULL). */ -SECP256K1_API int rustsecp256k1_v0_4_0_xonly_pubkey_serialize( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_xonly_pubkey_serialize( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output32, - const rustsecp256k1_v0_4_0_xonly_pubkey* pubkey + const rustsecp256k1_v0_4_1_xonly_pubkey* pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Converts a rustsecp256k1_v0_4_0_pubkey into a rustsecp256k1_v0_4_0_xonly_pubkey. +/** Compare two x-only public keys using lexicographic order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API int rustsecp256k1_v0_4_1_xonly_pubkey_cmp( + const rustsecp256k1_v0_4_1_context* ctx, + const rustsecp256k1_v0_4_1_xonly_pubkey* pk1, + const rustsecp256k1_v0_4_1_xonly_pubkey* pk2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Converts a rustsecp256k1_v0_4_1_pubkey into a rustsecp256k1_v0_4_1_xonly_pubkey. * * Returns: 1 if the public key was successfully converted * 0 otherwise @@ -80,11 +95,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_xonly_pubkey_serialize( * set to 0 otherwise. (can be NULL) * In: pubkey: pointer to a public key that is converted (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_xonly_pubkey *xonly_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_xonly_pubkey *xonly_pubkey, int *pk_parity, - const rustsecp256k1_v0_4_0_pubkey *pubkey + const rustsecp256k1_v0_4_1_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak an x-only public key by adding the generator multiplied with tweak32 @@ -92,7 +107,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey * * Note that the resulting point can not in general be represented by an x-only * pubkey because it may have an odd Y coordinate. Instead, the output_pubkey - * is a normal rustsecp256k1_v0_4_0_pubkey. + * is a normal rustsecp256k1_v0_4_1_pubkey. * * Returns: 0 if the arguments are invalid or the resulting public key would be * invalid (only when the tweak is the negation of the corresponding @@ -106,24 +121,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey * In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to. * (cannot be NULL). * tweak32: pointer to a 32-byte tweak. If the tweak is invalid - * according to rustsecp256k1_v0_4_0_ec_seckey_verify, this function + * according to rustsecp256k1_v0_4_1_ec_seckey_verify, this function * returns 0. For uniformly random 32-byte arrays the * chance of being invalid is negligible (around 1 in * 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *output_pubkey, - const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *output_pubkey, + const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Checks that a tweaked pubkey is the result of calling - * rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32. + * rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add with internal_pubkey and tweak32. * * The tweaked pubkey is represented by its 32-byte x-only serialization and * its pk_parity, which can both be obtained by converting the result of - * tweak_add to a rustsecp256k1_v0_4_0_xonly_pubkey. + * tweak_add to a rustsecp256k1_v0_4_1_xonly_pubkey. * * Note that this alone does _not_ verify that the tweaked pubkey is a * commitment. If the tweak is not chosen in a specific way, the tweaked pubkey @@ -137,17 +152,17 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey * tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization * is passed in as tweaked_pubkey32). This must match the * pk_parity value that is returned when calling - * rustsecp256k1_v0_4_0_xonly_pubkey with the tweaked pubkey, or + * rustsecp256k1_v0_4_1_xonly_pubkey with the tweaked pubkey, or * this function will fail. * internal_pubkey: pointer to an x-only public key object to apply the * tweak to (cannot be NULL) * tweak32: pointer to a 32-byte tweak (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check( + const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, - const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, + const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); @@ -159,12 +174,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey * Out: keypair: pointer to the created keypair (cannot be NULL) * In: seckey: pointer to a 32-byte secret key (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_create( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_create( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Get the secret key from a keypair. + * + * Returns: 0 if the arguments are invalid. 1 otherwise. + * Args: ctx: pointer to a context object (cannot be NULL) + * Out: seckey: pointer to a 32-byte buffer for the secret key (cannot be NULL) + * In: keypair: pointer to a keypair (cannot be NULL) + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_sec( + const rustsecp256k1_v0_4_1_context* ctx, + unsigned char *seckey, + const rustsecp256k1_v0_4_1_keypair *keypair +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Get the public key from a keypair. * * Returns: 0 if the arguments are invalid. 1 otherwise. @@ -174,16 +202,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_crea * (cannot be NULL) * In: keypair: pointer to a keypair (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_pub( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, - const rustsecp256k1_v0_4_0_keypair *keypair +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_pub( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, + const rustsecp256k1_v0_4_1_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the x-only public key from a keypair. * - * This is the same as calling rustsecp256k1_v0_4_0_keypair_pub and then - * rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey. + * This is the same as calling rustsecp256k1_v0_4_1_keypair_pub and then + * rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey. * * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object (cannot be NULL) @@ -192,22 +220,22 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_pub( * xonly_pubkey. If not, it's set to an invalid value (cannot be * NULL). * pk_parity: pointer to an integer that will be set to the pk_parity - * argument of rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey (can be NULL). + * argument of rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey (can be NULL). * In: keypair: pointer to a keypair (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonly_pub( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_xonly_pub( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, int *pk_parity, - const rustsecp256k1_v0_4_0_keypair *keypair + const rustsecp256k1_v0_4_1_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak a keypair by adding tweak32 to the secret key and updating the public * key accordingly. * - * Calling this function and then rustsecp256k1_v0_4_0_keypair_pub results in the same - * public key as calling rustsecp256k1_v0_4_0_keypair_xonly_pub and then - * rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add. + * Calling this function and then rustsecp256k1_v0_4_1_keypair_pub results in the same + * public key as calling rustsecp256k1_v0_4_1_keypair_xonly_pub and then + * rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add. * * Returns: 0 if the arguments are invalid or the resulting keypair would be * invalid (only when the tweak is the negation of the keypair's @@ -219,13 +247,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonl * an invalid value if this function returns 0 (cannot be * NULL). * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according - * to rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For + * to rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128) (cannot be NULL). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonly_tweak_add( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_xonly_tweak_add( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h index 121fd7d..4fc054c 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h @@ -16,8 +16,8 @@ extern "C" { * objects created by functions in secp256k1.h, i.e., they can be passed to any * API function that expects a context object (see secp256k1.h for details). The * only exception is that context objects created by functions in this module - * must be destroyed using rustsecp256k1_v0_4_0_context_preallocated_destroy (in this - * module) instead of rustsecp256k1_v0_4_0_context_destroy (in secp256k1.h). + * must be destroyed using rustsecp256k1_v0_4_1_context_preallocated_destroy (in this + * module) instead of rustsecp256k1_v0_4_1_context_destroy (in secp256k1.h). * * It is guaranteed that functions in this module will not call malloc or its * friends realloc, calloc, and free. @@ -27,24 +27,24 @@ extern "C" { * caller-provided memory. * * The purpose of this function is to determine how much memory must be provided - * to rustsecp256k1_v0_4_0_context_preallocated_create. + * to rustsecp256k1_v0_4_1_context_preallocated_create. * * Returns: the required size of the caller-provided memory block * In: flags: which parts of the context to initialize. */ -SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_size( +SECP256K1_API size_t rustsecp256k1_v0_4_1_context_preallocated_size( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; /** Create a secp256k1 context object in caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, which begins with the call to this - * function and ends when a call to rustsecp256k1_v0_4_0_context_preallocated_destroy + * function and ends when a call to rustsecp256k1_v0_4_1_context_preallocated_destroy * (which destroys the context object again) returns. During the lifetime of the * context object, the caller is obligated not to access this block of memory, * i.e., the caller may not read or write the memory, e.g., by copying the memory @@ -54,14 +54,14 @@ SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_size( * * Returns: a newly created context object. * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) * bytes, as detailed above (cannot be NULL) * flags: which parts of the context to initialize. * - * See also rustsecp256k1_v0_4_0_context_randomize (in secp256k1.h) - * and rustsecp256k1_v0_4_0_context_preallocated_destroy. + * See also rustsecp256k1_v0_4_1_context_randomize (in secp256k1.h) + * and rustsecp256k1_v0_4_1_context_preallocated_destroy. */ -SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_create( +SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_create( void* prealloc, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; @@ -72,28 +72,28 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallo * Returns: the required size of the caller-provided memory block. * In: ctx: an existing context to copy (cannot be NULL) */ -SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_clone_size( - const rustsecp256k1_v0_4_0_context* ctx +SECP256K1_API size_t rustsecp256k1_v0_4_1_context_preallocated_clone_size( + const rustsecp256k1_v0_4_1_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Copy a secp256k1 context object into caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, see the description of - * rustsecp256k1_v0_4_0_context_preallocated_create for details. + * rustsecp256k1_v0_4_1_context_preallocated_create for details. * * Returns: a newly created context object. * Args: ctx: an existing context to copy (cannot be NULL) * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) * bytes, as detailed above (cannot be NULL) */ -SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_clone( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_clone( + const rustsecp256k1_v0_4_1_context* ctx, void* prealloc ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; @@ -103,22 +103,22 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallo * The context pointer may not be used afterwards. * * The context to destroy must have been created using - * rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone. - * If the context has instead been created using rustsecp256k1_v0_4_0_context_create or - * rustsecp256k1_v0_4_0_context_clone, the behaviour is undefined. In that case, - * rustsecp256k1_v0_4_0_context_destroy must be used instead. + * rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone. + * If the context has instead been created using rustsecp256k1_v0_4_1_context_create or + * rustsecp256k1_v0_4_1_context_clone, the behaviour is undefined. In that case, + * rustsecp256k1_v0_4_1_context_destroy must be used instead. * * If required, it is the responsibility of the caller to deallocate the block * of memory properly after this function returns, e.g., by calling free on the - * preallocated pointer given to rustsecp256k1_v0_4_0_context_preallocated_create or - * rustsecp256k1_v0_4_0_context_preallocated_clone. + * preallocated pointer given to rustsecp256k1_v0_4_1_context_preallocated_create or + * rustsecp256k1_v0_4_1_context_preallocated_clone. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_4_0_context_preallocated_create or - * rustsecp256k1_v0_4_0_context_preallocated_clone (cannot be NULL) + * rustsecp256k1_v0_4_1_context_preallocated_create or + * rustsecp256k1_v0_4_1_context_preallocated_clone (cannot be NULL) */ -SECP256K1_API void rustsecp256k1_v0_4_0_context_preallocated_destroy( - rustsecp256k1_v0_4_0_context* ctx +SECP256K1_API void rustsecp256k1_v0_4_1_context_preallocated_destroy( + rustsecp256k1_v0_4_1_context* ctx ); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h index 65839d3..df1f110 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h @@ -14,8 +14,8 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 65 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, use - * the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions. + * the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions. * * Furthermore, it is guaranteed that identical signatures (including their * recoverability) will have identical representation, so they can be @@ -23,7 +23,7 @@ extern "C" { */ typedef struct { unsigned char data[65]; -} rustsecp256k1_v0_4_0_ecdsa_recoverable_signature; +} rustsecp256k1_v0_4_1_ecdsa_recoverable_signature; /** Parse a compact ECDSA signature (64 bytes + recovery id). * @@ -33,9 +33,9 @@ typedef struct { * In: input64: a pointer to a 64-byte compact signature * recid: the recovery id (0, 1, 2 or 3) */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -46,10 +46,10 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact * Out: sig: a pointer to a normal signature (cannot be NULL). * In: sigin: a pointer to a recoverable signature (cannot be NULL). */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_signature* sig, - const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sigin +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_signature* sig, + const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Serialize an ECDSA signature in compact format (64 bytes + recovery id). @@ -60,11 +60,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert( * recid: a pointer to an integer to hold the recovery id (can be NULL). * In: sig: a pointer to an initialized signature object (cannot be NULL) */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, int *recid, - const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig + const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Create a recoverable ECDSA signature. @@ -75,15 +75,15 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_com * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) * In: msghash32: the 32-byte message hash being signed (cannot be NULL) * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used + * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) */ -SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *sig, +SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign_recoverable( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_4_0_nonce_function noncefp, + rustsecp256k1_v0_4_1_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -96,10 +96,10 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable( * In: sig: pointer to initialized signature that supports pubkey recovery (cannot be NULL) * msghash32: the 32-byte message hash assumed to be signed (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_recover( - const rustsecp256k1_v0_4_0_context* ctx, - rustsecp256k1_v0_4_0_pubkey *pubkey, - const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_recover( + const rustsecp256k1_v0_4_1_context* ctx, + rustsecp256k1_v0_4_1_pubkey *pubkey, + const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *sig, const unsigned char *msghash32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h index db1b5eb..9150b2c 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h @@ -15,7 +15,7 @@ extern "C" { /** A pointer to a function to deterministically generate a nonce. * - * Same as rustsecp256k1_v0_4_0_nonce function with the exception of accepting an + * Same as rustsecp256k1_v0_4_1_nonce function with the exception of accepting an * additional pubkey argument and not requiring an attempt argument. The pubkey * argument can protect signature schemes with key-prefixed challenge hash * inputs against reusing the nonce when signing with the wrong precomputed @@ -35,7 +35,7 @@ extern "C" { * Except for test cases, this function should compute some cryptographic hash of * the message, the key, the pubkey, the algorithm description, and data. */ -typedef int (*rustsecp256k1_v0_4_0_nonce_function_hardened)( +typedef int (*rustsecp256k1_v0_4_1_nonce_function_hardened)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -56,16 +56,16 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function_hardened)( * bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set * to "BIP0340/nonce\0\0\0" */ -SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp256k1_v0_4_0_nonce_function_bip340; +SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function_hardened rustsecp256k1_v0_4_1_nonce_function_bip340; /** Create a Schnorr signature. * * Does _not_ strictly follow BIP-340 because it does not verify the resulting - * signature. Instead, you can manually use rustsecp256k1_v0_4_0_schnorrsig_verify and + * signature. Instead, you can manually use rustsecp256k1_v0_4_1_schnorrsig_verify and * abort if it fails. * * Otherwise BIP-340 compliant if the noncefp argument is NULL or - * rustsecp256k1_v0_4_0_nonce_function_bip340 and the ndata argument is 32-byte auxiliary + * rustsecp256k1_v0_4_1_nonce_function_bip340 and the ndata argument is 32-byte auxiliary * randomness. * * Returns 1 on success, 0 on failure. @@ -73,18 +73,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp * Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL) * In: msg32: the 32-byte message being signed (cannot be NULL) * keypair: pointer to an initialized keypair (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_bip340 is used + * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_bip340 is used * ndata: pointer to arbitrary data used by the nonce generation * function (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_4_0_nonce_function_bip340 is used, then ndata must be a + * rustsecp256k1_v0_4_1_nonce_function_bip340 is used, then ndata must be a * pointer to 32-byte auxiliary randomness as per BIP-340. */ -SECP256K1_API int rustsecp256k1_v0_4_0_schnorrsig_sign( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_4_1_schnorrsig_sign( + const rustsecp256k1_v0_4_1_context* ctx, unsigned char *sig64, const unsigned char *msg32, - const rustsecp256k1_v0_4_0_keypair *keypair, - rustsecp256k1_v0_4_0_nonce_function_hardened noncefp, + const rustsecp256k1_v0_4_1_keypair *keypair, + rustsecp256k1_v0_4_1_nonce_function_hardened noncefp, void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -97,11 +97,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_schnorrsig_sign( * msg32: the 32-byte message being verified (cannot be NULL) * pubkey: pointer to an x-only public key to verify with (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_schnorrsig_verify( - const rustsecp256k1_v0_4_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_schnorrsig_verify( + const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, - const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey + const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/obj/.gitignore b/secp256k1-sys/depend/secp256k1/obj/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage index 9e1bc17..7ad9621 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage @@ -1,4 +1,4 @@ -load("rustsecp256k1_v0_4_0_params.sage") +load("rustsecp256k1_v0_4_1_params.sage") orders_done = set() results = {} @@ -95,13 +95,13 @@ for f in sorted(results.keys()): G = results[f]["G"] print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f)) first = False - print("static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST(") + print("static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(") print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(");") - print("static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(") + print("static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(") print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(");") diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage index 99d0592..dc78ddc 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage @@ -1,9 +1,9 @@ -""" Generates the constants used in rustsecp256k1_v0_4_0_scalar_split_lambda. +""" Generates the constants used in rustsecp256k1_v0_4_1_scalar_split_lambda. -See the comments for rustsecp256k1_v0_4_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations. +See the comments for rustsecp256k1_v0_4_1_scalar_split_lambda in src/scalar_impl.h for detailed explanations. """ -load("rustsecp256k1_v0_4_0_params.sage") +load("rustsecp256k1_v0_4_1_params.sage") def inf_norm(v): """Returns the infinity norm of a vector.""" @@ -24,17 +24,17 @@ def gauss_reduction(i1, i2): v2[1] -= m*v1[1] def find_split_constants_gauss(): - """Find constants for rustsecp256k1_v0_4_0_scalar_split_lamdba using gauss reduction.""" + """Find constants for rustsecp256k1_v0_4_1_scalar_split_lamdba using gauss reduction.""" (v11, v12), (v21, v22) = gauss_reduction([0, N], [1, int(LAMBDA)]) - # We use related vectors in rustsecp256k1_v0_4_0_scalar_split_lambda. + # We use related vectors in rustsecp256k1_v0_4_1_scalar_split_lambda. A1, B1 = -v21, -v11 A2, B2 = v22, -v21 return A1, B1, A2, B2 def find_split_constants_explicit_tof(): - """Find constants for rustsecp256k1_v0_4_0_scalar_split_lamdba using the trace of Frobenius. + """Find constants for rustsecp256k1_v0_4_1_scalar_split_lamdba using the trace of Frobenius. See Benjamin Smith: "Easy scalar decompositions for efficient scalar multiplication on elliptic curves and genus 2 Jacobians" (https://eprint.iacr.org/2013/672), Example 2 @@ -51,7 +51,7 @@ def find_split_constants_explicit_tof(): A2 = Integer((t + c)/2 - 1) B2 = Integer(1 - (t - c)/2) - # We use a negated b values in rustsecp256k1_v0_4_0_scalar_split_lambda. + # We use a negated b values in rustsecp256k1_v0_4_1_scalar_split_lambda. B1, B2 = -B1, -B2 return A1, B1, A2, B2 @@ -90,7 +90,7 @@ def rnddiv2(v): return v >> 1 def scalar_lambda_split(k): - """Equivalent to rustsecp256k1_v0_4_0_scalar_lambda_split().""" + """Equivalent to rustsecp256k1_v0_4_1_scalar_lambda_split().""" c1 = rnddiv2((k * G1) >> 383) c2 = rnddiv2((k * G2) >> 383) c1 = (c1 * -B1) % N diff --git a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage index a408bf1..88b5865 100644 --- a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage +++ b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage @@ -5,8 +5,8 @@ import sys load("group_prover.sage") load("weierstrass_prover.sage") -def formula_rustsecp256k1_v0_4_0_gej_double_var(a): - """libsecp256k1's rustsecp256k1_v0_4_0_gej_double_var, used by various addition functions""" +def formula_rustsecp256k1_v0_4_1_gej_double_var(a): + """libsecp256k1's rustsecp256k1_v0_4_1_gej_double_var, used by various addition functions""" rz = a.Z * a.Y rz = rz * 2 t1 = a.X^2 @@ -29,8 +29,8 @@ def formula_rustsecp256k1_v0_4_0_gej_double_var(a): ry = ry + t2 return jacobianpoint(rx, ry, rz) -def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_4_0_gej_add_var""" +def formula_rustsecp256k1_v0_4_1_gej_add_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_4_1_gej_add_var""" if branch == 0: return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -48,7 +48,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b): i = -s1 i = i + s2 if branch == 2: - r = formula_rustsecp256k1_v0_4_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_4_1_gej_double_var(a) return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) if branch == 3: return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -71,8 +71,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_4_0_gej_add_ge_var, which assume bz==1""" +def formula_rustsecp256k1_v0_4_1_gej_add_ge_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_4_1_gej_add_ge_var, which assume bz==1""" if branch == 0: return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -88,7 +88,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b): i = -s1 i = i + s2 if (branch == 2): - r = formula_rustsecp256k1_v0_4_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_4_1_gej_double_var(a) return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if (branch == 3): return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -110,8 +110,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b): ry = ry + h3 return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_4_0_gej_add_zinv_var""" +def formula_rustsecp256k1_v0_4_1_gej_add_zinv_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_4_1_gej_add_zinv_var""" bzinv = b.Z^(-1) if branch == 0: return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a) @@ -134,7 +134,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b): i = -s1 i = i + s2 if branch == 2: - r = formula_rustsecp256k1_v0_4_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_4_1_gej_double_var(a) return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if branch == 3: return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) @@ -157,8 +157,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b): ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_4_0_gej_add_ge(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_4_0_gej_add_ge""" +def formula_rustsecp256k1_v0_4_1_gej_add_ge(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_4_1_gej_add_ge""" zeroes = {} nonzeroes = {} a_infinity = False @@ -229,8 +229,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity()) return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_4_0_gej_add_ge_old(branch, a, b): - """libsecp256k1's old rustsecp256k1_v0_4_0_gej_add_ge, which fails when ay+by=0 but ax!=bx""" +def formula_rustsecp256k1_v0_4_1_gej_add_ge_old(branch, a, b): + """libsecp256k1's old rustsecp256k1_v0_4_1_gej_add_ge, which fails when ay+by=0 but ax!=bx""" a_infinity = (branch & 1) != 0 zero = {} nonzero = {} @@ -292,15 +292,15 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_old(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz)) if __name__ == "__main__": - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_ge_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_zinv_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_0_gej_add_ge) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_0_gej_add_ge_old) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_ge_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_zinv_var) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_1_gej_add_ge) + check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_1_gej_add_ge_old) if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive": - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_ge_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_zinv_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_0_gej_add_ge, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_0_gej_add_ge_old, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_ge_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_zinv_var, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_1_gej_add_ge, 43) + check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_1_gej_add_ge_old, 43) diff --git a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s index ace090c..a706bec 100644 --- a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s +++ b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s @@ -27,8 +27,8 @@ Note: .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff .align 2 - .global rustsecp256k1_v0_4_0_fe_mul_inner - .type rustsecp256k1_v0_4_0_fe_mul_inner, %function + .global rustsecp256k1_v0_4_1_fe_mul_inner + .type rustsecp256k1_v0_4_1_fe_mul_inner, %function @ Arguments: @ r0 r Restrict: can overlap with a, not with b @ r1 a @@ -36,7 +36,7 @@ Note: @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_4_0_fe_mul_inner: +rustsecp256k1_v0_4_1_fe_mul_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -511,18 +511,18 @@ rustsecp256k1_v0_4_0_fe_mul_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_4_0_fe_mul_inner, .-rustsecp256k1_v0_4_0_fe_mul_inner + .size rustsecp256k1_v0_4_1_fe_mul_inner, .-rustsecp256k1_v0_4_1_fe_mul_inner .align 2 - .global rustsecp256k1_v0_4_0_fe_sqr_inner - .type rustsecp256k1_v0_4_0_fe_sqr_inner, %function + .global rustsecp256k1_v0_4_1_fe_sqr_inner + .type rustsecp256k1_v0_4_1_fe_sqr_inner, %function @ Arguments: @ r0 r Can overlap with a @ r1 a @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_4_0_fe_sqr_inner: +rustsecp256k1_v0_4_1_fe_sqr_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -909,5 +909,5 @@ rustsecp256k1_v0_4_0_fe_sqr_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_4_0_fe_sqr_inner, .-rustsecp256k1_v0_4_0_fe_sqr_inner + .size rustsecp256k1_v0_4_1_fe_sqr_inner, .-rustsecp256k1_v0_4_1_fe_sqr_inner diff --git a/secp256k1-sys/depend/secp256k1/src/assumptions.h b/secp256k1-sys/depend/secp256k1/src/assumptions.h index 3273641..3d2da24 100644 --- a/secp256k1-sys/depend/secp256k1/src/assumptions.h +++ b/secp256k1-sys/depend/secp256k1/src/assumptions.h @@ -16,7 +16,7 @@ reduce the odds of experiencing an unwelcome surprise. */ -struct rustsecp256k1_v0_4_0_assumption_checker { +struct rustsecp256k1_v0_4_1_assumption_checker { /* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not allowed. */ int dummy_array[( diff --git a/secp256k1-sys/depend/secp256k1/src/basic-config.h b/secp256k1-sys/depend/secp256k1/src/basic-config.h index bb6b582..6f7693c 100644 --- a/secp256k1-sys/depend/secp256k1/src/basic-config.h +++ b/secp256k1-sys/depend/secp256k1/src/basic-config.h @@ -9,25 +9,8 @@ #ifdef USE_BASIC_CONFIG -#undef USE_ASM_X86_64 -#undef USE_ECMULT_STATIC_PRECOMPUTATION -#undef USE_EXTERNAL_ASM -#undef USE_EXTERNAL_DEFAULT_CALLBACKS -#undef USE_FIELD_INV_BUILTIN -#undef USE_FIELD_INV_NUM -#undef USE_NUM_GMP -#undef USE_NUM_NONE -#undef USE_SCALAR_INV_BUILTIN -#undef USE_SCALAR_INV_NUM -#undef USE_FORCE_WIDEMUL_INT64 -#undef USE_FORCE_WIDEMUL_INT128 -#undef ECMULT_WINDOW_SIZE - -#define USE_NUM_NONE 1 -#define USE_FIELD_INV_BUILTIN 1 -#define USE_SCALAR_INV_BUILTIN 1 -#define USE_WIDEMUL_64 1 #define ECMULT_WINDOW_SIZE 15 +#define ECMULT_GEN_PREC_BITS 4 #endif /* USE_BASIC_CONFIG */ diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c b/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c index 615c0d4..7c9d4e4 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecdh.c @@ -6,14 +6,14 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_ecdh.h" #include "util.h" #include "bench.h" typedef struct { - rustsecp256k1_v0_4_0_context *ctx; - rustsecp256k1_v0_4_0_pubkey point; + rustsecp256k1_v0_4_1_context *ctx; + rustsecp256k1_v0_4_1_pubkey point; unsigned char scalar[32]; } bench_ecdh_data; @@ -31,7 +31,7 @@ static void bench_ecdh_setup(void* arg) { for (i = 0; i < 32; i++) { data->scalar[i] = i + 1; } - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); } static void bench_ecdh(void* arg, int iters) { @@ -40,7 +40,7 @@ static void bench_ecdh(void* arg, int iters) { bench_ecdh_data *data = (bench_ecdh_data*)arg; for (i = 0; i < iters; i++) { - CHECK(rustsecp256k1_v0_4_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); } } @@ -50,10 +50,10 @@ int main(void) { int iters = get_iters(20000); /* create a context with no capabilities */ - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c index 884d8f4..153b8b0 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c @@ -5,48 +5,192 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "util.h" #include "hash_impl.h" -#include "num_impl.h" #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" #define POINTS 32768 +void help(char **argv) { + printf("Benchmark EC multiplication algorithms\n"); + printf("\n"); + printf("Usage: %s \n", argv[0]); + printf("The output shows the number of multiplied and summed points right after the\n"); + printf("function name. The letter 'g' indicates that one of the points is the generator.\n"); + printf("The benchmarks are divided by the number of points.\n"); + printf("\n"); + printf("default (ecmult_multi): picks pippenger_wnaf or strauss_wnaf depending on the\n"); + printf(" batch size\n"); + printf("pippenger_wnaf: for all batch sizes\n"); + printf("strauss_wnaf: for all batch sizes\n"); + printf("simple: multiply and sum each point individually\n"); +} + typedef struct { /* Setup once in advance */ - rustsecp256k1_v0_4_0_context* ctx; - rustsecp256k1_v0_4_0_scratch_space* scratch; - rustsecp256k1_v0_4_0_scalar* scalars; - rustsecp256k1_v0_4_0_ge* pubkeys; - rustsecp256k1_v0_4_0_scalar* seckeys; - rustsecp256k1_v0_4_0_gej* expected_output; - rustsecp256k1_v0_4_0_ecmult_multi_func ecmult_multi; + rustsecp256k1_v0_4_1_context* ctx; + rustsecp256k1_v0_4_1_scratch_space* scratch; + rustsecp256k1_v0_4_1_scalar* scalars; + rustsecp256k1_v0_4_1_ge* pubkeys; + rustsecp256k1_v0_4_1_gej* pubkeys_gej; + rustsecp256k1_v0_4_1_scalar* seckeys; + rustsecp256k1_v0_4_1_gej* expected_output; + rustsecp256k1_v0_4_1_ecmult_multi_func ecmult_multi; - /* Changes per test */ + /* Changes per benchmark */ size_t count; int includes_g; - /* Changes per test iteration */ + /* Changes per benchmark iteration, used to pick different scalars and pubkeys + * in each run. */ size_t offset1; size_t offset2; - /* Test output. */ - rustsecp256k1_v0_4_0_gej* output; + /* Benchmark output. */ + rustsecp256k1_v0_4_1_gej* output; } bench_data; -static int bench_callback(rustsecp256k1_v0_4_0_scalar* sc, rustsecp256k1_v0_4_0_ge* ge, size_t idx, void* arg) { +/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */ +static void hash_into_offset(bench_data* data, size_t x) { + data->offset1 = (x * 0x537b7f6f + 0x8f66a481) % POINTS; + data->offset2 = (x * 0x7f6f537b + 0x6a1a8f49) % POINTS; +} + +/* Check correctness of the benchmark by computing + * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */ +static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) { + int i; + rustsecp256k1_v0_4_1_gej sum_output, tmp; + rustsecp256k1_v0_4_1_scalar sum_scalars; + + rustsecp256k1_v0_4_1_gej_set_infinity(&sum_output); + rustsecp256k1_v0_4_1_scalar_clear(&sum_scalars); + for (i = 0; i < iters; ++i) { + rustsecp256k1_v0_4_1_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); + if (scalar_gen_offset != NULL) { + rustsecp256k1_v0_4_1_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); + } + if (seckey_offset != NULL) { + rustsecp256k1_v0_4_1_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; + rustsecp256k1_v0_4_1_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); + rustsecp256k1_v0_4_1_scalar_add(&sum_scalars, &sum_scalars, &s); + } + } + rustsecp256k1_v0_4_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); + rustsecp256k1_v0_4_1_gej_neg(&tmp, &tmp); + rustsecp256k1_v0_4_1_gej_add_var(&tmp, &tmp, &sum_output, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&tmp)); +} + +static void bench_ecmult_setup(void* arg) { + bench_data* data = (bench_data*)arg; + /* Re-randomize offset to ensure that we're using different scalars and + * group elements in each run. */ + hash_into_offset(data, data->offset1); +} + +static void bench_ecmult_gen(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1_v0_4_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_gen_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_const(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1_v0_4_1_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256); + } +} + +static void bench_ecmult_const_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); + } +} + +static void bench_ecmult_1_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + rustsecp256k1_v0_4_1_scalar zero; + int i; + + rustsecp256k1_v0_4_1_scalar_set_int(&zero, 0); + for (i = 0; i < iters; ++i) { + rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_1g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_2g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters/2; ++i) { + rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_2g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, &data->offset1, iters/2); +} + +static void run_ecmult_bench(bench_data* data, int iters) { + char str[32]; + sprintf(str, "ecmult_gen"); + run_benchmark(str, bench_ecmult_gen, bench_ecmult_setup, bench_ecmult_gen_teardown, data, 10, iters); + sprintf(str, "ecmult_const"); + run_benchmark(str, bench_ecmult_const, bench_ecmult_setup, bench_ecmult_const_teardown, data, 10, iters); + /* ecmult with non generator point */ + sprintf(str, "ecmult 1"); + run_benchmark(str, bench_ecmult_1, bench_ecmult_setup, bench_ecmult_1_teardown, data, 10, iters); + /* ecmult with generator point */ + sprintf(str, "ecmult 1g"); + run_benchmark(str, bench_ecmult_1g, bench_ecmult_setup, bench_ecmult_1g_teardown, data, 10, iters); + /* ecmult with generator and non-generator point. The reported time is per point. */ + sprintf(str, "ecmult 2g"); + run_benchmark(str, bench_ecmult_2g, bench_ecmult_setup, bench_ecmult_2g_teardown, data, 10, 2*iters); +} + +static int bench_ecmult_multi_callback(rustsecp256k1_v0_4_1_scalar* sc, rustsecp256k1_v0_4_1_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { *sc = data->scalars[data->offset1]; - *ge = rustsecp256k1_v0_4_0_ge_const_g; + *ge = rustsecp256k1_v0_4_1_ge_const_g; } else { *sc = data->scalars[(data->offset1 + idx) % POINTS]; *ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS]; @@ -54,7 +198,7 @@ static int bench_callback(rustsecp256k1_v0_4_0_scalar* sc, rustsecp256k1_v0_4_0_ return 1; } -static void bench_ecmult(void* arg, int iters) { +static void bench_ecmult_multi(void* arg, int iters) { bench_data* data = (bench_data*)arg; int includes_g = data->includes_g; @@ -63,49 +207,48 @@ static void bench_ecmult(void* arg, int iters) { iters = iters / data->count; for (iter = 0; iter < iters; ++iter) { - data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g); + data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_ecmult_multi_callback, arg, count - includes_g); data->offset1 = (data->offset1 + count) % POINTS; data->offset2 = (data->offset2 + count - 1) % POINTS; } } -static void bench_ecmult_setup(void* arg) { +static void bench_ecmult_multi_setup(void* arg) { bench_data* data = (bench_data*)arg; - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); } -static void bench_ecmult_teardown(void* arg, int iters) { +static void bench_ecmult_multi_teardown(void* arg, int iters) { bench_data* data = (bench_data*)arg; int iter; iters = iters / data->count; /* Verify the results in teardown, to avoid doing comparisons while benchmarking. */ for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_4_0_gej tmp; - rustsecp256k1_v0_4_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&tmp)); + rustsecp256k1_v0_4_1_gej tmp; + rustsecp256k1_v0_4_1_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&tmp)); } } -static void generate_scalar(uint32_t num, rustsecp256k1_v0_4_0_scalar* scalar) { - rustsecp256k1_v0_4_0_sha256 sha256; - unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; +static void generate_scalar(uint32_t num, rustsecp256k1_v0_4_1_scalar* scalar) { + rustsecp256k1_v0_4_1_sha256 sha256; + unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; c[6] = num; c[7] = num >> 8; c[8] = num >> 16; c[9] = num >> 24; - rustsecp256k1_v0_4_0_sha256_initialize(&sha256); - rustsecp256k1_v0_4_0_sha256_write(&sha256, c, sizeof(c)); - rustsecp256k1_v0_4_0_sha256_finalize(&sha256, buf); - rustsecp256k1_v0_4_0_scalar_set_b32(scalar, buf, &overflow); + rustsecp256k1_v0_4_1_sha256_initialize(&sha256); + rustsecp256k1_v0_4_1_sha256_write(&sha256, c, sizeof(c)); + rustsecp256k1_v0_4_1_sha256_finalize(&sha256, buf); + rustsecp256k1_v0_4_1_scalar_set_b32(scalar, buf, &overflow); CHECK(!overflow); } -static void run_test(bench_data* data, size_t count, int includes_g, int num_iters) { +static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) { char str[32]; - static const rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + num_iters / count; size_t iter; @@ -113,80 +256,89 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite data->includes_g = includes_g; /* Compute (the negation of) the expected results directly. */ - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_4_0_scalar tmp; - rustsecp256k1_v0_4_0_scalar total = data->scalars[(data->offset1++) % POINTS]; + rustsecp256k1_v0_4_1_scalar tmp; + rustsecp256k1_v0_4_1_scalar total = data->scalars[(data->offset1++) % POINTS]; size_t i = 0; for (i = 0; i + 1 < count; ++i) { - rustsecp256k1_v0_4_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); - rustsecp256k1_v0_4_0_scalar_add(&total, &total, &tmp); + rustsecp256k1_v0_4_1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); + rustsecp256k1_v0_4_1_scalar_add(&total, &total, &tmp); } - rustsecp256k1_v0_4_0_scalar_negate(&total, &total); - rustsecp256k1_v0_4_0_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total); + rustsecp256k1_v0_4_1_scalar_negate(&total, &total); + rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total); } /* Run the benchmark. */ - sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count); - run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * iters); + sprintf(str, includes_g ? "ecmult_multi %ig" : "ecmult_multi %i", (int)count); + run_benchmark(str, bench_ecmult_multi, bench_ecmult_multi_setup, bench_ecmult_multi_teardown, data, 10, count * iters); } int main(int argc, char **argv) { bench_data data; int i, p; - rustsecp256k1_v0_4_0_gej* pubkeys_gej; size_t scratch_size; int iters = get_iters(10000); - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = rustsecp256k1_v0_4_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; - data.scratch = rustsecp256k1_v0_4_0_scratch_space_create(data.ctx, scratch_size); - data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_multi_var; + data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_multi_var; if (argc > 1) { - if(have_flag(argc, argv, "pippenger_wnaf")) { + if(have_flag(argc, argv, "-h") + || have_flag(argc, argv, "--help") + || have_flag(argc, argv, "help")) { + help(argv); + return 1; + } else if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_pippenger_batch_single; + data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { printf("Using strauss_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_strauss_batch_single; + data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); - data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_multi_var; - rustsecp256k1_v0_4_0_scratch_space_destroy(data.ctx, data.scratch); - data.scratch = NULL; } else { - fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]); - fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n"); + fprintf(stderr, "%s: unrecognized argument '%s'.\n\n", argv[0], argv[1]); + help(argv); return 1; } } + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + scratch_size = rustsecp256k1_v0_4_1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + if (!have_flag(argc, argv, "simple")) { + data.scratch = rustsecp256k1_v0_4_1_scratch_space_create(data.ctx, scratch_size); + } else { + data.scratch = NULL; + } + /* Allocate stuff */ - data.scalars = malloc(sizeof(rustsecp256k1_v0_4_0_scalar) * POINTS); - data.seckeys = malloc(sizeof(rustsecp256k1_v0_4_0_scalar) * POINTS); - data.pubkeys = malloc(sizeof(rustsecp256k1_v0_4_0_ge) * POINTS); - data.expected_output = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * (iters + 1)); - data.output = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * (iters + 1)); + data.scalars = malloc(sizeof(rustsecp256k1_v0_4_1_scalar) * POINTS); + data.seckeys = malloc(sizeof(rustsecp256k1_v0_4_1_scalar) * POINTS); + data.pubkeys = malloc(sizeof(rustsecp256k1_v0_4_1_ge) * POINTS); + data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * POINTS); + data.expected_output = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * (iters + 1)); + data.output = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * (iters + 1)); /* Generate a set of scalars, and private/public keypairs. */ - pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * POINTS); - rustsecp256k1_v0_4_0_gej_set_ge(&pubkeys_gej[0], &rustsecp256k1_v0_4_0_ge_const_g); - rustsecp256k1_v0_4_0_scalar_set_int(&data.seckeys[0], 1); + rustsecp256k1_v0_4_1_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_4_1_ge_const_g); + rustsecp256k1_v0_4_1_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - rustsecp256k1_v0_4_0_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL); - rustsecp256k1_v0_4_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); + rustsecp256k1_v0_4_1_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); + rustsecp256k1_v0_4_1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - rustsecp256k1_v0_4_0_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); - free(pubkeys_gej); + rustsecp256k1_v0_4_1_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); + + + /* Initialize offset1 and offset2 */ + hash_into_offset(&data, 0); + run_ecmult_bench(&data, iters); for (i = 1; i <= 8; ++i) { - run_test(&data, i, 1, iters); + run_ecmult_multi_bench(&data, i, 1, iters); } /* This is disabled with low count of iterations because the loop runs 77 times even with iters=1 @@ -195,17 +347,18 @@ int main(int argc, char **argv) { if (iters > 2) { for (p = 0; p <= 11; ++p) { for (i = 9; i <= 16; ++i) { - run_test(&data, i << p, 1, iters); + run_ecmult_multi_bench(&data, i << p, 1, iters); } } } if (data.scratch != NULL) { - rustsecp256k1_v0_4_0_scratch_space_destroy(data.ctx, data.scratch); + rustsecp256k1_v0_4_1_scratch_space_destroy(data.ctx, data.scratch); } - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); + free(data.pubkeys_gej); free(data.seckeys); free(data.output); free(data.expected_output); diff --git a/secp256k1-sys/depend/secp256k1/src/bench_internal.c b/secp256k1-sys/depend/secp256k1/src/bench_internal.c index e2c7244..9b0a758 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_internal.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_internal.c @@ -5,25 +5,24 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "hash_impl.h" -#include "num_impl.h" #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" #include "ecmult_const_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" typedef struct { - rustsecp256k1_v0_4_0_scalar scalar[2]; - rustsecp256k1_v0_4_0_fe fe[4]; - rustsecp256k1_v0_4_0_ge ge[2]; - rustsecp256k1_v0_4_0_gej gej[2]; + rustsecp256k1_v0_4_1_scalar scalar[2]; + rustsecp256k1_v0_4_1_fe fe[4]; + rustsecp256k1_v0_4_1_ge ge[2]; + rustsecp256k1_v0_4_1_gej gej[2]; unsigned char data[64]; int wnaf[256]; } bench_inv; @@ -64,18 +63,18 @@ void bench_setup(void* arg) { } }; - rustsecp256k1_v0_4_0_scalar_set_b32(&data->scalar[0], init[0], NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&data->scalar[1], init[1], NULL); - rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[0], init[0]); - rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[1], init[1]); - rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[2], init[2]); - rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[3], init[3]); - CHECK(rustsecp256k1_v0_4_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); - CHECK(rustsecp256k1_v0_4_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); - rustsecp256k1_v0_4_0_gej_set_ge(&data->gej[0], &data->ge[0]); - rustsecp256k1_v0_4_0_gej_rescale(&data->gej[0], &data->fe[2]); - rustsecp256k1_v0_4_0_gej_set_ge(&data->gej[1], &data->ge[1]); - rustsecp256k1_v0_4_0_gej_rescale(&data->gej[1], &data->fe[3]); + rustsecp256k1_v0_4_1_scalar_set_b32(&data->scalar[0], init[0], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(&data->scalar[1], init[1], NULL); + rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[0], init[0]); + rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[1], init[1]); + rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[2], init[2]); + rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[3], init[3]); + CHECK(rustsecp256k1_v0_4_1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); + CHECK(rustsecp256k1_v0_4_1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); + rustsecp256k1_v0_4_1_gej_set_ge(&data->gej[0], &data->ge[0]); + rustsecp256k1_v0_4_1_gej_rescale(&data->gej[0], &data->fe[2]); + rustsecp256k1_v0_4_1_gej_set_ge(&data->gej[1], &data->ge[1]); + rustsecp256k1_v0_4_1_gej_rescale(&data->gej[1], &data->fe[3]); memcpy(data->data, init[0], 32); memcpy(data->data + 32, init[1], 32); } @@ -85,7 +84,7 @@ void bench_scalar_add(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -95,16 +94,7 @@ void bench_scalar_negate(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_negate(&data->scalar[0], &data->scalar[0]); - } -} - -void bench_scalar_sqr(void* arg, int iters) { - int i; - bench_inv *data = (bench_inv*)arg; - - for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&data->scalar[0], &data->scalar[0]); + rustsecp256k1_v0_4_1_scalar_negate(&data->scalar[0], &data->scalar[0]); } } @@ -113,7 +103,7 @@ void bench_scalar_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_4_1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } } @@ -122,8 +112,8 @@ void bench_scalar_split(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]); - j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_4_1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]); + j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -133,8 +123,8 @@ void bench_scalar_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_inverse(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_4_1_scalar_inverse(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -144,8 +134,8 @@ void bench_scalar_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_4_1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -155,7 +145,7 @@ void bench_field_normalize(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_normalize(&data->fe[0]); + rustsecp256k1_v0_4_1_fe_normalize(&data->fe[0]); } } @@ -164,7 +154,7 @@ void bench_field_normalize_weak(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_normalize_weak(&data->fe[0]); + rustsecp256k1_v0_4_1_fe_normalize_weak(&data->fe[0]); } } @@ -173,7 +163,7 @@ void bench_field_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); + rustsecp256k1_v0_4_1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); } } @@ -182,7 +172,7 @@ void bench_field_sqr(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_sqr(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_4_1_fe_sqr(&data->fe[0], &data->fe[0]); } } @@ -191,8 +181,8 @@ void bench_field_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_inv(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_4_1_fe_inv(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]); } } @@ -201,20 +191,20 @@ void bench_field_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_fe_inv_var(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_4_1_fe_inv_var(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]); } } void bench_field_sqrt(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_4_0_fe t; + rustsecp256k1_v0_4_1_fe t; for (i = 0; i < iters; i++) { t = data->fe[0]; - j += rustsecp256k1_v0_4_0_fe_sqrt(&data->fe[0], &t); - rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]); + j += rustsecp256k1_v0_4_1_fe_sqrt(&data->fe[0], &t); + rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]); } CHECK(j <= iters); } @@ -224,7 +214,7 @@ void bench_group_double_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_gej_double_var(&data->gej[0], &data->gej[0], NULL); + rustsecp256k1_v0_4_1_gej_double_var(&data->gej[0], &data->gej[0], NULL); } } @@ -233,7 +223,7 @@ void bench_group_add_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); + rustsecp256k1_v0_4_1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); } } @@ -242,7 +232,7 @@ void bench_group_add_affine(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); + rustsecp256k1_v0_4_1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); } } @@ -251,45 +241,27 @@ void bench_group_add_affine_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); } } -void bench_group_jacobi_var(void* arg, int iters) { - int i, j = 0; - bench_inv *data = (bench_inv*)arg; - - for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_4_0_gej_has_quad_y_var(&data->gej[0]); - /* Vary the Y and Z coordinates of the input (the X coordinate doesn't matter to - rustsecp256k1_v0_4_0_gej_has_quad_y_var). Note that the resulting coordinates will - generally not correspond to a point on the curve, but this is not a problem - for the code being benchmarked here. Adding and normalizing have less - overhead than EC operations (which could guarantee the point remains on the - curve). */ - rustsecp256k1_v0_4_0_fe_add(&data->gej[0].y, &data->fe[1]); - rustsecp256k1_v0_4_0_fe_add(&data->gej[0].z, &data->fe[2]); - rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].y); - rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].z); - } - CHECK(j <= iters); -} - void bench_group_to_affine_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_4_0_ge_set_gej_var(&data->ge[1], &data->gej[0]); + rustsecp256k1_v0_4_1_ge_set_gej_var(&data->ge[1], &data->gej[0]); /* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates. - Similar to bench_group_jacobi_var, this approach does not result in - coordinates of points on the curve. */ - rustsecp256k1_v0_4_0_fe_add(&data->gej[0].x, &data->ge[1].y); - rustsecp256k1_v0_4_0_fe_add(&data->gej[0].y, &data->fe[2]); - rustsecp256k1_v0_4_0_fe_add(&data->gej[0].z, &data->ge[1].x); - rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].x); - rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].y); - rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].z); + Note that the resulting coordinates will generally not correspond to a point + on the curve, but this is not a problem for the code being benchmarked here. + Adding and normalizing have less overhead than EC operations (which could + guarantee the point remains on the curve). */ + rustsecp256k1_v0_4_1_fe_add(&data->gej[0].x, &data->ge[1].y); + rustsecp256k1_v0_4_1_fe_add(&data->gej[0].y, &data->fe[2]); + rustsecp256k1_v0_4_1_fe_add(&data->gej[0].z, &data->ge[1].x); + rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].x); + rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].y); + rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].z); } } @@ -298,8 +270,8 @@ void bench_ecmult_wnaf(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_4_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); - overflow += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + bits += rustsecp256k1_v0_4_1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); + overflow += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); @@ -310,8 +282,8 @@ void bench_wnaf_const(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_4_0_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); - overflow += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + bits += rustsecp256k1_v0_4_1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); + overflow += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); @@ -321,35 +293,35 @@ void bench_wnaf_const(void* arg, int iters) { void bench_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_4_0_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_sha256_initialize(&sha); - rustsecp256k1_v0_4_0_sha256_write(&sha, data->data, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, data->data); + rustsecp256k1_v0_4_1_sha256_initialize(&sha); + rustsecp256k1_v0_4_1_sha256_write(&sha, data->data, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, data->data); } } void bench_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_4_0_hmac_sha256 hmac; + rustsecp256k1_v0_4_1_hmac_sha256 hmac; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, data->data, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, data->data, 32); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, data->data); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, data->data, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, data->data, 32); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, data->data); } } void bench_rfc6979_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, data->data, 32); } } @@ -357,7 +329,7 @@ void bench_context_verify(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_context_destroy(rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY)); + rustsecp256k1_v0_4_1_context_destroy(rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY)); } } @@ -365,39 +337,20 @@ void bench_context_sign(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_context_destroy(rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN)); + rustsecp256k1_v0_4_1_context_destroy(rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN)); } } -#ifndef USE_NUM_NONE -void bench_num_jacobi(void* arg, int iters) { - int i, j = 0; - bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_4_0_num nx, na, norder; - - rustsecp256k1_v0_4_0_scalar_get_num(&nx, &data->scalar[0]); - rustsecp256k1_v0_4_0_scalar_order_get_num(&norder); - rustsecp256k1_v0_4_0_scalar_get_num(&na, &data->scalar[1]); - - for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_4_0_num_jacobi(&nx, &norder); - rustsecp256k1_v0_4_0_num_add(&nx, &nx, &na); - } - CHECK(j <= iters); -} -#endif - int main(int argc, char **argv) { bench_inv data; int iters = get_iters(20000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); + if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters); + if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100); @@ -411,7 +364,6 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10); - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); @@ -424,8 +376,5 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 1 + iters/1000); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 1 + iters/100); -#ifndef USE_NUM_NONE - if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, iters*10); -#endif return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_recover.c b/secp256k1-sys/depend/secp256k1/src/bench_recover.c index 143b9be..b0e5c87 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_recover.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_recover.c @@ -4,13 +4,13 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_recovery.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_recovery.h" #include "util.h" #include "bench.h" typedef struct { - rustsecp256k1_v0_4_0_context *ctx; + rustsecp256k1_v0_4_1_context *ctx; unsigned char msg[32]; unsigned char sig[64]; } bench_recover_data; @@ -18,16 +18,16 @@ typedef struct { void bench_recover(void* arg, int iters) { int i; bench_recover_data *data = (bench_recover_data*)arg; - rustsecp256k1_v0_4_0_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey pubkey; unsigned char pubkeyc[33]; for (i = 0; i < iters; i++) { int j; size_t pubkeylen = 33; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature sig; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature sig; + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); for (j = 0; j < 32; j++) { data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ data->msg[j] = data->sig[j]; /* Move former R to message. */ @@ -53,10 +53,10 @@ int main(void) { int iters = get_iters(20000); - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY); + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY); run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_schnorrsig.c b/secp256k1-sys/depend/secp256k1/src/bench_schnorrsig.c index 02fd02e..daa54ac 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_schnorrsig.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_schnorrsig.c @@ -8,16 +8,16 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_schnorrsig.h" #include "util.h" #include "bench.h" typedef struct { - rustsecp256k1_v0_4_0_context *ctx; + rustsecp256k1_v0_4_1_context *ctx; int n; - const rustsecp256k1_v0_4_0_keypair **keypairs; + const rustsecp256k1_v0_4_1_keypair **keypairs; const unsigned char **pk; const unsigned char **sigs; const unsigned char **msgs; @@ -32,7 +32,7 @@ void bench_schnorrsig_sign(void* arg, int iters) { for (i = 0; i < iters; i++) { msg[0] = i; msg[1] = i >> 8; - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL)); } } @@ -41,9 +41,9 @@ void bench_schnorrsig_verify(void* arg, int iters) { int i; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk)); + rustsecp256k1_v0_4_1_xonly_pubkey pk; + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk)); } } @@ -52,8 +52,8 @@ int main(void) { bench_schnorrsig_data data; int iters = get_iters(10000); - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN); - data.keypairs = (const rustsecp256k1_v0_4_0_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_4_0_keypair *)); + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN); + data.keypairs = (const rustsecp256k1_v0_4_1_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_4_1_keypair *)); data.pk = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); @@ -62,9 +62,9 @@ int main(void) { unsigned char sk[32]; unsigned char *msg = (unsigned char *)malloc(32); unsigned char *sig = (unsigned char *)malloc(64); - rustsecp256k1_v0_4_0_keypair *keypair = (rustsecp256k1_v0_4_0_keypair *)malloc(sizeof(*keypair)); + rustsecp256k1_v0_4_1_keypair *keypair = (rustsecp256k1_v0_4_1_keypair *)malloc(sizeof(*keypair)); unsigned char *pk_char = (unsigned char *)malloc(32); - rustsecp256k1_v0_4_0_xonly_pubkey pk; + rustsecp256k1_v0_4_1_xonly_pubkey pk; msg[0] = sk[0] = i; msg[1] = sk[1] = i >> 8; msg[2] = sk[2] = i >> 16; @@ -77,10 +77,10 @@ int main(void) { data.msgs[i] = msg; data.sigs[i] = sig; - CHECK(rustsecp256k1_v0_4_0_keypair_create(data.ctx, keypair, sk)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(data.ctx, keypair, sk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); } run_benchmark("schnorrsig_sign", bench_schnorrsig_sign, NULL, NULL, (void *) &data, 10, iters); @@ -97,6 +97,6 @@ int main(void) { free(data.msgs); free(data.sigs); - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_sign.c b/secp256k1-sys/depend/secp256k1/src/bench_sign.c index daab28b..ef80deb 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_sign.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_sign.c @@ -4,12 +4,12 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" typedef struct { - rustsecp256k1_v0_4_0_context* ctx; + rustsecp256k1_v0_4_1_context* ctx; unsigned char msg[32]; unsigned char key[32]; } bench_sign_data; @@ -34,9 +34,9 @@ static void bench_sign_run(void* arg, int iters) { for (i = 0; i < iters; i++) { size_t siglen = 74; int j; - rustsecp256k1_v0_4_0_ecdsa_signature signature; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); + rustsecp256k1_v0_4_1_ecdsa_signature signature; + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); for (j = 0; j < 32; j++) { data->msg[j] = sig[j]; data->key[j] = sig[j + 32]; @@ -49,10 +49,10 @@ int main(void) { int iters = get_iters(20000); - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN); + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN); run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/bench_verify.c b/secp256k1-sys/depend/secp256k1/src/bench_verify.c index d708d2e..33dd2c1 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_verify.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_verify.c @@ -7,7 +7,7 @@ #include #include -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" @@ -19,7 +19,7 @@ typedef struct { - rustsecp256k1_v0_4_0_context *ctx; + rustsecp256k1_v0_4_1_context *ctx; unsigned char msg[32]; unsigned char key[32]; unsigned char sig[72]; @@ -36,14 +36,14 @@ static void bench_verify(void* arg, int iters) { bench_verify_data* data = (bench_verify_data*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_ecdsa_signature sig; data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); @@ -82,13 +82,13 @@ static void bench_verify_openssl(void* arg, int iters) { int main(void) { int i; - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_ecdsa_signature sig; bench_verify_data data; int iters = get_iters(20000); - data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; @@ -97,11 +97,11 @@ int main(void) { data.key[i] = 33 + i; } data.siglen = 72; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(data.ctx, &pubkey, data.key)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(data.ctx, &pubkey, data.key)); data.pubkeylen = 33; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); run_benchmark("ecdsa_verify", bench_verify, NULL, NULL, &data, 10, iters); #ifdef ENABLE_OPENSSL_TESTS @@ -110,6 +110,6 @@ int main(void) { EC_GROUP_free(data.ec_group); #endif - rustsecp256k1_v0_4_0_context_destroy(data.ctx); + rustsecp256k1_v0_4_1_context_destroy(data.ctx); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa.h b/secp256k1-sys/depend/secp256k1/src/ecdsa.h index c070704..407ec64 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa.h @@ -13,9 +13,9 @@ #include "group.h" #include "ecmult.h" -static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *r, rustsecp256k1_v0_4_0_scalar *s, const unsigned char *sig, size_t size); -static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *s); -static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message); -static int rustsecp256k1_v0_4_0_ecdsa_sig_sign(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_scalar *seckey, const rustsecp256k1_v0_4_0_scalar *message, const rustsecp256k1_v0_4_0_scalar *nonce, int *recid); +static int rustsecp256k1_v0_4_1_ecdsa_sig_parse(rustsecp256k1_v0_4_1_scalar *r, rustsecp256k1_v0_4_1_scalar *s, const unsigned char *sig, size_t size); +static int rustsecp256k1_v0_4_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *s); +static int rustsecp256k1_v0_4_1_ecdsa_sig_verify(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message); +static int rustsecp256k1_v0_4_1_ecdsa_sig_sign(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_scalar *seckey, const rustsecp256k1_v0_4_1_scalar *message, const rustsecp256k1_v0_4_1_scalar *nonce, int *recid); #endif /* SECP256K1_ECDSA_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h index 5489eff..fe6f9e2 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h @@ -28,7 +28,7 @@ * sage: '%x' % (EllipticCurve ([F (a), F (b)]).order()) * 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' */ -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL ); @@ -42,11 +42,11 @@ static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_order_as_f * sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order()) * '14551231950b75fc4402da1722fc9baee' */ -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL ); -static int rustsecp256k1_v0_4_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { +static int rustsecp256k1_v0_4_1_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { size_t lenleft; unsigned char b1; VERIFY_CHECK(len != NULL); @@ -99,7 +99,7 @@ static int rustsecp256k1_v0_4_0_der_read_len(size_t *len, const unsigned char ** return 1; } -static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) { +static int rustsecp256k1_v0_4_1_der_parse_integer(rustsecp256k1_v0_4_1_scalar *r, const unsigned char **sig, const unsigned char *sigend) { int overflow = 0; unsigned char ra[32] = {0}; size_t rlen; @@ -109,7 +109,7 @@ static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r return 0; } (*sig)++; - if (rustsecp256k1_v0_4_0_der_read_len(&rlen, sig, sigend) == 0) { + if (rustsecp256k1_v0_4_1_der_read_len(&rlen, sig, sigend) == 0) { return 0; } if (rlen == 0 || *sig + rlen > sigend) { @@ -141,23 +141,23 @@ static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r } if (!overflow) { memcpy(ra + 32 - rlen, *sig, rlen); - rustsecp256k1_v0_4_0_scalar_set_b32(r, ra, &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(r, ra, &overflow); } if (overflow) { - rustsecp256k1_v0_4_0_scalar_set_int(r, 0); + rustsecp256k1_v0_4_1_scalar_set_int(r, 0); } (*sig) += rlen; return 1; } -static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr, rustsecp256k1_v0_4_0_scalar *rs, const unsigned char *sig, size_t size) { +static int rustsecp256k1_v0_4_1_ecdsa_sig_parse(rustsecp256k1_v0_4_1_scalar *rr, rustsecp256k1_v0_4_1_scalar *rs, const unsigned char *sig, size_t size) { const unsigned char *sigend = sig + size; size_t rlen; if (sig == sigend || *(sig++) != 0x30) { /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ return 0; } - if (rustsecp256k1_v0_4_0_der_read_len(&rlen, &sig, sigend) == 0) { + if (rustsecp256k1_v0_4_1_der_read_len(&rlen, &sig, sigend) == 0) { return 0; } if (rlen != (size_t)(sigend - sig)) { @@ -165,10 +165,10 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr, return 0; } - if (!rustsecp256k1_v0_4_0_der_parse_integer(rr, &sig, sigend)) { + if (!rustsecp256k1_v0_4_1_der_parse_integer(rr, &sig, sigend)) { return 0; } - if (!rustsecp256k1_v0_4_0_der_parse_integer(rs, &sig, sigend)) { + if (!rustsecp256k1_v0_4_1_der_parse_integer(rs, &sig, sigend)) { return 0; } @@ -180,12 +180,12 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr, return 1; } -static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_0_scalar* ar, const rustsecp256k1_v0_4_0_scalar* as) { +static int rustsecp256k1_v0_4_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_1_scalar* ar, const rustsecp256k1_v0_4_1_scalar* as) { unsigned char r[33] = {0}, s[33] = {0}; unsigned char *rp = r, *sp = s; size_t lenR = 33, lenS = 33; - rustsecp256k1_v0_4_0_scalar_get_b32(&r[1], ar); - rustsecp256k1_v0_4_0_scalar_get_b32(&s[1], as); + rustsecp256k1_v0_4_1_scalar_get_b32(&r[1], ar); + rustsecp256k1_v0_4_1_scalar_get_b32(&s[1], as); while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } if (*size < 6+lenS+lenR) { @@ -204,42 +204,42 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t * return 1; } -static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar *sigr, const rustsecp256k1_v0_4_0_scalar *sigs, const rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message) { +static int rustsecp256k1_v0_4_1_ecdsa_sig_verify(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar *sigr, const rustsecp256k1_v0_4_1_scalar *sigs, const rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message) { unsigned char c[32]; - rustsecp256k1_v0_4_0_scalar sn, u1, u2; + rustsecp256k1_v0_4_1_scalar sn, u1, u2; #if !defined(EXHAUSTIVE_TEST_ORDER) - rustsecp256k1_v0_4_0_fe xr; + rustsecp256k1_v0_4_1_fe xr; #endif - rustsecp256k1_v0_4_0_gej pubkeyj; - rustsecp256k1_v0_4_0_gej pr; + rustsecp256k1_v0_4_1_gej pubkeyj; + rustsecp256k1_v0_4_1_gej pr; - if (rustsecp256k1_v0_4_0_scalar_is_zero(sigr) || rustsecp256k1_v0_4_0_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_4_1_scalar_is_zero(sigr) || rustsecp256k1_v0_4_1_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_4_0_scalar_inverse_var(&sn, sigs); - rustsecp256k1_v0_4_0_scalar_mul(&u1, &sn, message); - rustsecp256k1_v0_4_0_scalar_mul(&u2, &sn, sigr); - rustsecp256k1_v0_4_0_gej_set_ge(&pubkeyj, pubkey); - rustsecp256k1_v0_4_0_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); - if (rustsecp256k1_v0_4_0_gej_is_infinity(&pr)) { + rustsecp256k1_v0_4_1_scalar_inverse_var(&sn, sigs); + rustsecp256k1_v0_4_1_scalar_mul(&u1, &sn, message); + rustsecp256k1_v0_4_1_scalar_mul(&u2, &sn, sigr); + rustsecp256k1_v0_4_1_gej_set_ge(&pubkeyj, pubkey); + rustsecp256k1_v0_4_1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); + if (rustsecp256k1_v0_4_1_gej_is_infinity(&pr)) { return 0; } #if defined(EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_4_0_scalar computed_r; - rustsecp256k1_v0_4_0_ge pr_ge; - rustsecp256k1_v0_4_0_ge_set_gej(&pr_ge, &pr); - rustsecp256k1_v0_4_0_fe_normalize(&pr_ge.x); + rustsecp256k1_v0_4_1_scalar computed_r; + rustsecp256k1_v0_4_1_ge pr_ge; + rustsecp256k1_v0_4_1_ge_set_gej(&pr_ge, &pr); + rustsecp256k1_v0_4_1_fe_normalize(&pr_ge.x); - rustsecp256k1_v0_4_0_fe_get_b32(c, &pr_ge.x); - rustsecp256k1_v0_4_0_scalar_set_b32(&computed_r, c, NULL); - return rustsecp256k1_v0_4_0_scalar_eq(sigr, &computed_r); + rustsecp256k1_v0_4_1_fe_get_b32(c, &pr_ge.x); + rustsecp256k1_v0_4_1_scalar_set_b32(&computed_r, c, NULL); + return rustsecp256k1_v0_4_1_scalar_eq(sigr, &computed_r); } #else - rustsecp256k1_v0_4_0_scalar_get_b32(c, sigr); - rustsecp256k1_v0_4_0_fe_set_b32(&xr, c); + rustsecp256k1_v0_4_1_scalar_get_b32(c, sigr); + rustsecp256k1_v0_4_1_fe_set_b32(&xr, c); /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n) * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), @@ -255,18 +255,18 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmu * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) * * Thus, we can avoid the inversion, but we have to check both cases separately. - * rustsecp256k1_v0_4_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. + * rustsecp256k1_v0_4_1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. */ - if (rustsecp256k1_v0_4_0_gej_eq_x_var(&xr, &pr)) { + if (rustsecp256k1_v0_4_1_gej_eq_x_var(&xr, &pr)) { /* xr * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } - if (rustsecp256k1_v0_4_0_fe_cmp_var(&xr, &rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_4_1_fe_cmp_var(&xr, &rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order) >= 0) { /* xr + n >= p, so we can skip testing the second case. */ return 0; } - rustsecp256k1_v0_4_0_fe_add(&xr, &rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe); - if (rustsecp256k1_v0_4_0_gej_eq_x_var(&xr, &pr)) { + rustsecp256k1_v0_4_1_fe_add(&xr, &rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe); + if (rustsecp256k1_v0_4_1_gej_eq_x_var(&xr, &pr)) { /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } @@ -274,42 +274,42 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmu #endif } -static int rustsecp256k1_v0_4_0_ecdsa_sig_sign(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_scalar *sigr, rustsecp256k1_v0_4_0_scalar *sigs, const rustsecp256k1_v0_4_0_scalar *seckey, const rustsecp256k1_v0_4_0_scalar *message, const rustsecp256k1_v0_4_0_scalar *nonce, int *recid) { +static int rustsecp256k1_v0_4_1_ecdsa_sig_sign(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_scalar *sigr, rustsecp256k1_v0_4_1_scalar *sigs, const rustsecp256k1_v0_4_1_scalar *seckey, const rustsecp256k1_v0_4_1_scalar *message, const rustsecp256k1_v0_4_1_scalar *nonce, int *recid) { unsigned char b[32]; - rustsecp256k1_v0_4_0_gej rp; - rustsecp256k1_v0_4_0_ge r; - rustsecp256k1_v0_4_0_scalar n; + rustsecp256k1_v0_4_1_gej rp; + rustsecp256k1_v0_4_1_ge r; + rustsecp256k1_v0_4_1_scalar n; int overflow = 0; int high; - rustsecp256k1_v0_4_0_ecmult_gen(ctx, &rp, nonce); - rustsecp256k1_v0_4_0_ge_set_gej(&r, &rp); - rustsecp256k1_v0_4_0_fe_normalize(&r.x); - rustsecp256k1_v0_4_0_fe_normalize(&r.y); - rustsecp256k1_v0_4_0_fe_get_b32(b, &r.x); - rustsecp256k1_v0_4_0_scalar_set_b32(sigr, b, &overflow); + rustsecp256k1_v0_4_1_ecmult_gen(ctx, &rp, nonce); + rustsecp256k1_v0_4_1_ge_set_gej(&r, &rp); + rustsecp256k1_v0_4_1_fe_normalize(&r.x); + rustsecp256k1_v0_4_1_fe_normalize(&r.y); + rustsecp256k1_v0_4_1_fe_get_b32(b, &r.x); + rustsecp256k1_v0_4_1_scalar_set_b32(sigr, b, &overflow); if (recid) { /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. */ - *recid = (overflow << 1) | rustsecp256k1_v0_4_0_fe_is_odd(&r.y); + *recid = (overflow << 1) | rustsecp256k1_v0_4_1_fe_is_odd(&r.y); } - rustsecp256k1_v0_4_0_scalar_mul(&n, sigr, seckey); - rustsecp256k1_v0_4_0_scalar_add(&n, &n, message); - rustsecp256k1_v0_4_0_scalar_inverse(sigs, nonce); - rustsecp256k1_v0_4_0_scalar_mul(sigs, sigs, &n); - rustsecp256k1_v0_4_0_scalar_clear(&n); - rustsecp256k1_v0_4_0_gej_clear(&rp); - rustsecp256k1_v0_4_0_ge_clear(&r); - high = rustsecp256k1_v0_4_0_scalar_is_high(sigs); - rustsecp256k1_v0_4_0_scalar_cond_negate(sigs, high); + rustsecp256k1_v0_4_1_scalar_mul(&n, sigr, seckey); + rustsecp256k1_v0_4_1_scalar_add(&n, &n, message); + rustsecp256k1_v0_4_1_scalar_inverse(sigs, nonce); + rustsecp256k1_v0_4_1_scalar_mul(sigs, sigs, &n); + rustsecp256k1_v0_4_1_scalar_clear(&n); + rustsecp256k1_v0_4_1_gej_clear(&rp); + rustsecp256k1_v0_4_1_ge_clear(&r); + high = rustsecp256k1_v0_4_1_scalar_is_high(sigs); + rustsecp256k1_v0_4_1_scalar_cond_negate(sigs, high); if (recid) { *recid ^= high; } /* P.x = order is on the curve, so technically sig->r could end up being zero, which would be an invalid signature. * This is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N. */ - return !rustsecp256k1_v0_4_0_scalar_is_zero(sigr) & !rustsecp256k1_v0_4_0_scalar_is_zero(sigs); + return !rustsecp256k1_v0_4_1_scalar_is_zero(sigr) & !rustsecp256k1_v0_4_1_scalar_is_zero(sigs); } #endif /* SECP256K1_ECDSA_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey.h b/secp256k1-sys/depend/secp256k1/src/eckey.h index 1979808..b1224ff 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey.h @@ -14,12 +14,12 @@ #include "ecmult.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_4_0_eckey_pubkey_parse(rustsecp256k1_v0_4_0_ge *elem, const unsigned char *pub, size_t size); -static int rustsecp256k1_v0_4_0_eckey_pubkey_serialize(rustsecp256k1_v0_4_0_ge *elem, unsigned char *pub, size_t *size, int compressed); +static int rustsecp256k1_v0_4_1_eckey_pubkey_parse(rustsecp256k1_v0_4_1_ge *elem, const unsigned char *pub, size_t size); +static int rustsecp256k1_v0_4_1_eckey_pubkey_serialize(rustsecp256k1_v0_4_1_ge *elem, unsigned char *pub, size_t *size, int compressed); -static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak); -static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak); -static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak); -static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak); +static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak); +static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak); +static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak); +static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak); #endif /* SECP256K1_ECKEY_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h index 4692c5d..b067e03 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h @@ -14,82 +14,82 @@ #include "group.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_4_0_eckey_pubkey_parse(rustsecp256k1_v0_4_0_ge *elem, const unsigned char *pub, size_t size) { +static int rustsecp256k1_v0_4_1_eckey_pubkey_parse(rustsecp256k1_v0_4_1_ge *elem, const unsigned char *pub, size_t size) { if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) { - rustsecp256k1_v0_4_0_fe x; - return rustsecp256k1_v0_4_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_4_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); + rustsecp256k1_v0_4_1_fe x; + return rustsecp256k1_v0_4_1_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_4_1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); } else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { - rustsecp256k1_v0_4_0_fe x, y; - if (!rustsecp256k1_v0_4_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_4_0_fe_set_b32(&y, pub+33)) { + rustsecp256k1_v0_4_1_fe x, y; + if (!rustsecp256k1_v0_4_1_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_4_1_fe_set_b32(&y, pub+33)) { return 0; } - rustsecp256k1_v0_4_0_ge_set_xy(elem, &x, &y); + rustsecp256k1_v0_4_1_ge_set_xy(elem, &x, &y); if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) && - rustsecp256k1_v0_4_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { + rustsecp256k1_v0_4_1_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { return 0; } - return rustsecp256k1_v0_4_0_ge_is_valid_var(elem); + return rustsecp256k1_v0_4_1_ge_is_valid_var(elem); } else { return 0; } } -static int rustsecp256k1_v0_4_0_eckey_pubkey_serialize(rustsecp256k1_v0_4_0_ge *elem, unsigned char *pub, size_t *size, int compressed) { - if (rustsecp256k1_v0_4_0_ge_is_infinity(elem)) { +static int rustsecp256k1_v0_4_1_eckey_pubkey_serialize(rustsecp256k1_v0_4_1_ge *elem, unsigned char *pub, size_t *size, int compressed) { + if (rustsecp256k1_v0_4_1_ge_is_infinity(elem)) { return 0; } - rustsecp256k1_v0_4_0_fe_normalize_var(&elem->x); - rustsecp256k1_v0_4_0_fe_normalize_var(&elem->y); - rustsecp256k1_v0_4_0_fe_get_b32(&pub[1], &elem->x); + rustsecp256k1_v0_4_1_fe_normalize_var(&elem->x); + rustsecp256k1_v0_4_1_fe_normalize_var(&elem->y); + rustsecp256k1_v0_4_1_fe_get_b32(&pub[1], &elem->x); if (compressed) { *size = 33; - pub[0] = rustsecp256k1_v0_4_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; + pub[0] = rustsecp256k1_v0_4_1_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; } else { *size = 65; pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - rustsecp256k1_v0_4_0_fe_get_b32(&pub[33], &elem->y); + rustsecp256k1_v0_4_1_fe_get_b32(&pub[33], &elem->y); } return 1; } -static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak) { - rustsecp256k1_v0_4_0_scalar_add(key, key, tweak); - return !rustsecp256k1_v0_4_0_scalar_is_zero(key); +static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak) { + rustsecp256k1_v0_4_1_scalar_add(key, key, tweak); + return !rustsecp256k1_v0_4_1_scalar_is_zero(key); } -static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak) { - rustsecp256k1_v0_4_0_gej pt; - rustsecp256k1_v0_4_0_scalar one; - rustsecp256k1_v0_4_0_gej_set_ge(&pt, key); - rustsecp256k1_v0_4_0_scalar_set_int(&one, 1); - rustsecp256k1_v0_4_0_ecmult(ctx, &pt, &pt, &one, tweak); +static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak) { + rustsecp256k1_v0_4_1_gej pt; + rustsecp256k1_v0_4_1_scalar one; + rustsecp256k1_v0_4_1_gej_set_ge(&pt, key); + rustsecp256k1_v0_4_1_scalar_set_int(&one, 1); + rustsecp256k1_v0_4_1_ecmult(ctx, &pt, &pt, &one, tweak); - if (rustsecp256k1_v0_4_0_gej_is_infinity(&pt)) { + if (rustsecp256k1_v0_4_1_gej_is_infinity(&pt)) { return 0; } - rustsecp256k1_v0_4_0_ge_set_gej(key, &pt); + rustsecp256k1_v0_4_1_ge_set_gej(key, &pt); return 1; } -static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak) { +static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak) { int ret; - ret = !rustsecp256k1_v0_4_0_scalar_is_zero(tweak); + ret = !rustsecp256k1_v0_4_1_scalar_is_zero(tweak); - rustsecp256k1_v0_4_0_scalar_mul(key, key, tweak); + rustsecp256k1_v0_4_1_scalar_mul(key, key, tweak); return ret; } -static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak) { - rustsecp256k1_v0_4_0_scalar zero; - rustsecp256k1_v0_4_0_gej pt; - if (rustsecp256k1_v0_4_0_scalar_is_zero(tweak)) { +static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak) { + rustsecp256k1_v0_4_1_scalar zero; + rustsecp256k1_v0_4_1_gej pt; + if (rustsecp256k1_v0_4_1_scalar_is_zero(tweak)) { return 0; } - rustsecp256k1_v0_4_0_scalar_set_int(&zero, 0); - rustsecp256k1_v0_4_0_gej_set_ge(&pt, key); - rustsecp256k1_v0_4_0_ecmult(ctx, &pt, &pt, tweak, &zero); - rustsecp256k1_v0_4_0_ge_set_gej(key, &pt); + rustsecp256k1_v0_4_1_scalar_set_int(&zero, 0); + rustsecp256k1_v0_4_1_gej_set_ge(&pt, key); + rustsecp256k1_v0_4_1_ecmult(ctx, &pt, &pt, tweak, &zero); + rustsecp256k1_v0_4_1_ge_set_gej(key, &pt); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult.h b/secp256k1-sys/depend/secp256k1/src/ecmult.h index 622a0b2..688cf5b 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult.h @@ -7,28 +7,27 @@ #ifndef SECP256K1_ECMULT_H #define SECP256K1_ECMULT_H -#include "num.h" #include "group.h" #include "scalar.h" #include "scratch.h" typedef struct { /* For accelerating the computation of a*P + b*G: */ - rustsecp256k1_v0_4_0_ge_storage (*pre_g)[]; /* odd multiples of the generator */ - rustsecp256k1_v0_4_0_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ -} rustsecp256k1_v0_4_0_ecmult_context; + rustsecp256k1_v0_4_1_ge_storage (*pre_g)[]; /* odd multiples of the generator */ + rustsecp256k1_v0_4_1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ +} rustsecp256k1_v0_4_1_ecmult_context; static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; -static void rustsecp256k1_v0_4_0_ecmult_context_init(rustsecp256k1_v0_4_0_ecmult_context *ctx); -static void rustsecp256k1_v0_4_0_ecmult_context_build(rustsecp256k1_v0_4_0_ecmult_context *ctx, void **prealloc); -static void rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_context *dst, const rustsecp256k1_v0_4_0_ecmult_context *src); -static void rustsecp256k1_v0_4_0_ecmult_context_clear(rustsecp256k1_v0_4_0_ecmult_context *ctx); -static int rustsecp256k1_v0_4_0_ecmult_context_is_built(const rustsecp256k1_v0_4_0_ecmult_context *ctx); +static void rustsecp256k1_v0_4_1_ecmult_context_init(rustsecp256k1_v0_4_1_ecmult_context *ctx); +static void rustsecp256k1_v0_4_1_ecmult_context_build(rustsecp256k1_v0_4_1_ecmult_context *ctx, void **prealloc); +static void rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_context *dst, const rustsecp256k1_v0_4_1_ecmult_context *src); +static void rustsecp256k1_v0_4_1_ecmult_context_clear(rustsecp256k1_v0_4_1_ecmult_context *ctx); +static int rustsecp256k1_v0_4_1_ecmult_context_is_built(const rustsecp256k1_v0_4_1_ecmult_context *ctx); /** Double multiply: R = na*A + ng*G */ -static void rustsecp256k1_v0_4_0_ecmult(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_scalar *na, const rustsecp256k1_v0_4_0_scalar *ng); +static void rustsecp256k1_v0_4_1_ecmult(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_scalar *na, const rustsecp256k1_v0_4_1_scalar *ng); -typedef int (rustsecp256k1_v0_4_0_ecmult_multi_callback)(rustsecp256k1_v0_4_0_scalar *sc, rustsecp256k1_v0_4_0_ge *pt, size_t idx, void *data); +typedef int (rustsecp256k1_v0_4_1_ecmult_multi_callback)(rustsecp256k1_v0_4_1_scalar *sc, rustsecp256k1_v0_4_1_ge *pt, size_t idx, void *data); /** * Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai. @@ -41,6 +40,6 @@ typedef int (rustsecp256k1_v0_4_0_ecmult_multi_callback)(rustsecp256k1_v0_4_0_sc * 0 if there is not enough scratch space for a single point or * callback returns 0 */ -static int rustsecp256k1_v0_4_0_ecmult_multi_var(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n); +static int rustsecp256k1_v0_4_1_ecmult_multi_var(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h index 600c31f..7d7611b 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h @@ -15,6 +15,6 @@ * Here `bits` should be set to the maximum bitlength of the _absolute value_ of `q`, plus * one because we internally sometimes add 2 to the number during the WNAF conversion. */ -static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_scalar *q, int bits); +static void rustsecp256k1_v0_4_1_ecmult_const(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_scalar *q, int bits); #endif /* SECP256K1_ECMULT_CONST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h index e0596bb..155c5ea 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h @@ -19,12 +19,12 @@ int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \ int abs_n = ((n) + mask) ^ mask; \ int idx_n = abs_n >> 1; \ - rustsecp256k1_v0_4_0_fe neg_y; \ + rustsecp256k1_v0_4_1_fe neg_y; \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - VERIFY_SETUP(rustsecp256k1_v0_4_0_fe_clear(&(r)->x)); \ - VERIFY_SETUP(rustsecp256k1_v0_4_0_fe_clear(&(r)->y)); \ + VERIFY_SETUP(rustsecp256k1_v0_4_1_fe_clear(&(r)->x)); \ + VERIFY_SETUP(rustsecp256k1_v0_4_1_fe_clear(&(r)->y)); \ /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \ * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ (r)->x = (pre)[m].x; \ @@ -32,12 +32,12 @@ for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \ /* This loop is used to avoid secret data in array indices. See * the comment in ecmult_gen_impl.h for rationale. */ \ - rustsecp256k1_v0_4_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ - rustsecp256k1_v0_4_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ + rustsecp256k1_v0_4_1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ + rustsecp256k1_v0_4_1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ } \ (r)->infinity = 0; \ - rustsecp256k1_v0_4_0_fe_negate(&neg_y, &(r)->y, 1); \ - rustsecp256k1_v0_4_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ + rustsecp256k1_v0_4_1_fe_negate(&neg_y, &(r)->y, 1); \ + rustsecp256k1_v0_4_1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ } while(0) @@ -54,7 +54,7 @@ * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ -static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0_scalar *scalar, int w, int size) { +static int rustsecp256k1_v0_4_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_1_scalar *scalar, int w, int size) { int global_sign; int skew = 0; int word = 0; @@ -65,7 +65,7 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0 int flip; int bit; - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_scalar s; int not_neg_one; VERIFY_CHECK(w > 0); @@ -83,32 +83,32 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0 * particular, to ensure that the outputs from the endomorphism-split fit into * 128 bits). If we negate, the parity of our number flips, inverting which of * {1, 2} we want to add to the scalar when ensuring that it's odd. Further - * complicating things, -1 interacts badly with `rustsecp256k1_v0_4_0_scalar_cadd_bit` and + * complicating things, -1 interacts badly with `rustsecp256k1_v0_4_1_scalar_cadd_bit` and * we need to special-case it in this logic. */ - flip = rustsecp256k1_v0_4_0_scalar_is_high(scalar); + flip = rustsecp256k1_v0_4_1_scalar_is_high(scalar); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ !rustsecp256k1_v0_4_0_scalar_is_even(scalar); + bit = flip ^ !rustsecp256k1_v0_4_1_scalar_is_even(scalar); /* We check for negative one, since adding 2 to it will cause an overflow */ - rustsecp256k1_v0_4_0_scalar_negate(&s, scalar); - not_neg_one = !rustsecp256k1_v0_4_0_scalar_is_one(&s); + rustsecp256k1_v0_4_1_scalar_negate(&s, scalar); + not_neg_one = !rustsecp256k1_v0_4_1_scalar_is_one(&s); s = *scalar; - rustsecp256k1_v0_4_0_scalar_cadd_bit(&s, bit, not_neg_one); + rustsecp256k1_v0_4_1_scalar_cadd_bit(&s, bit, not_neg_one); /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects * that we added two to it and flipped it. In fact for -1 these operations are * identical. We only flipped, but since skewing is required (in the sense that * the skew must be 1 or 2, never zero) and flipping is not, we need to change * our flags to claim that we only skewed. */ - global_sign = rustsecp256k1_v0_4_0_scalar_cond_negate(&s, flip); + global_sign = rustsecp256k1_v0_4_1_scalar_cond_negate(&s, flip); global_sign *= not_neg_one * 2 - 1; skew = 1 << bit; /* 4 */ - u_last = rustsecp256k1_v0_4_0_scalar_shr_int(&s, w); + u_last = rustsecp256k1_v0_4_1_scalar_shr_int(&s, w); do { int even; /* 4.1 4.4 */ - u = rustsecp256k1_v0_4_0_scalar_shr_int(&s, w); + u = rustsecp256k1_v0_4_1_scalar_shr_int(&s, w); /* 4.2 */ even = ((u & 1) == 0); /* In contrast to the original algorithm, u_last is always > 0 and @@ -129,21 +129,21 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0 } while (word * w < size); wnaf[word] = u * global_sign; - VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&s)); + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&s)); VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); return skew; } -static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_scalar *scalar, int size) { - rustsecp256k1_v0_4_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_4_0_ge tmpa; - rustsecp256k1_v0_4_0_fe Z; +static void rustsecp256k1_v0_4_1_ecmult_const(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_scalar *scalar, int size) { + rustsecp256k1_v0_4_1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_4_1_ge tmpa; + rustsecp256k1_v0_4_1_fe Z; int skew_1; - rustsecp256k1_v0_4_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_4_1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; int skew_lam; - rustsecp256k1_v0_4_0_scalar q_1, q_lam; + rustsecp256k1_v0_4_1_scalar q_1, q_lam; int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; @@ -153,12 +153,12 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - rustsecp256k1_v0_4_0_scalar_split_lambda(&q_1, &q_lam, scalar); - skew_1 = rustsecp256k1_v0_4_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); - skew_lam = rustsecp256k1_v0_4_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); + rustsecp256k1_v0_4_1_scalar_split_lambda(&q_1, &q_lam, scalar); + skew_1 = rustsecp256k1_v0_4_1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); + skew_lam = rustsecp256k1_v0_4_1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else { - skew_1 = rustsecp256k1_v0_4_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); + skew_1 = rustsecp256k1_v0_4_1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); skew_lam = 0; } @@ -168,14 +168,14 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. */ - rustsecp256k1_v0_4_0_gej_set_ge(r, a); - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); + rustsecp256k1_v0_4_1_gej_set_ge(r, a); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_4_0_fe_normalize_weak(&pre_a[i].y); + rustsecp256k1_v0_4_1_fe_normalize_weak(&pre_a[i].y); } if (size > 128) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_4_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); + rustsecp256k1_v0_4_1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } } @@ -186,67 +186,67 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - rustsecp256k1_v0_4_0_gej_set_ge(r, &tmpa); + rustsecp256k1_v0_4_1_gej_set_ge(r, &tmpa); if (size > 128) { i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa); } /* remaining loop iterations */ for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) { int n; int j; for (j = 0; j < WINDOW_A - 1; ++j) { - rustsecp256k1_v0_4_0_gej_double(r, r); + rustsecp256k1_v0_4_1_gej_double(r, r); } n = wnaf_1[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa); if (size > 128) { n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa); } } - rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &Z); { /* Correct for wNAF skew */ - rustsecp256k1_v0_4_0_ge correction = *a; - rustsecp256k1_v0_4_0_ge_storage correction_1_stor; - rustsecp256k1_v0_4_0_ge_storage correction_lam_stor; - rustsecp256k1_v0_4_0_ge_storage a2_stor; - rustsecp256k1_v0_4_0_gej tmpj; - rustsecp256k1_v0_4_0_gej_set_ge(&tmpj, &correction); - rustsecp256k1_v0_4_0_gej_double_var(&tmpj, &tmpj, NULL); - rustsecp256k1_v0_4_0_ge_set_gej(&correction, &tmpj); - rustsecp256k1_v0_4_0_ge_to_storage(&correction_1_stor, a); + rustsecp256k1_v0_4_1_ge correction = *a; + rustsecp256k1_v0_4_1_ge_storage correction_1_stor; + rustsecp256k1_v0_4_1_ge_storage correction_lam_stor; + rustsecp256k1_v0_4_1_ge_storage a2_stor; + rustsecp256k1_v0_4_1_gej tmpj; + rustsecp256k1_v0_4_1_gej_set_ge(&tmpj, &correction); + rustsecp256k1_v0_4_1_gej_double_var(&tmpj, &tmpj, NULL); + rustsecp256k1_v0_4_1_ge_set_gej(&correction, &tmpj); + rustsecp256k1_v0_4_1_ge_to_storage(&correction_1_stor, a); if (size > 128) { - rustsecp256k1_v0_4_0_ge_to_storage(&correction_lam_stor, a); + rustsecp256k1_v0_4_1_ge_to_storage(&correction_lam_stor, a); } - rustsecp256k1_v0_4_0_ge_to_storage(&a2_stor, &correction); + rustsecp256k1_v0_4_1_ge_to_storage(&a2_stor, &correction); /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ - rustsecp256k1_v0_4_0_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); + rustsecp256k1_v0_4_1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); if (size > 128) { - rustsecp256k1_v0_4_0_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); + rustsecp256k1_v0_4_1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); } /* Apply the correction */ - rustsecp256k1_v0_4_0_ge_from_storage(&correction, &correction_1_stor); - rustsecp256k1_v0_4_0_ge_neg(&correction, &correction); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &correction); + rustsecp256k1_v0_4_1_ge_from_storage(&correction, &correction_1_stor); + rustsecp256k1_v0_4_1_ge_neg(&correction, &correction); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &correction); if (size > 128) { - rustsecp256k1_v0_4_0_ge_from_storage(&correction, &correction_lam_stor); - rustsecp256k1_v0_4_0_ge_neg(&correction, &correction); - rustsecp256k1_v0_4_0_ge_mul_lambda(&correction, &correction); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &correction); + rustsecp256k1_v0_4_1_ge_from_storage(&correction, &correction_lam_stor); + rustsecp256k1_v0_4_1_ge_neg(&correction, &correction); + rustsecp256k1_v0_4_1_ge_mul_lambda(&correction, &correction); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &correction); } } } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h index 036a0bd..d6bb68d 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h @@ -30,21 +30,21 @@ typedef struct { * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. */ - rustsecp256k1_v0_4_0_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */ - rustsecp256k1_v0_4_0_scalar blind; - rustsecp256k1_v0_4_0_gej initial; -} rustsecp256k1_v0_4_0_ecmult_gen_context; + rustsecp256k1_v0_4_1_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */ + rustsecp256k1_v0_4_1_scalar blind; + rustsecp256k1_v0_4_1_gej initial; +} rustsecp256k1_v0_4_1_ecmult_gen_context; static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; -static void rustsecp256k1_v0_4_0_ecmult_gen_context_init(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx); -static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx, void **prealloc); -static void rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_gen_context *dst, const rustsecp256k1_v0_4_0_ecmult_gen_context* src); -static void rustsecp256k1_v0_4_0_ecmult_gen_context_clear(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx); -static int rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx, void **prealloc); +static void rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context *dst, const rustsecp256k1_v0_4_1_ecmult_gen_context* src); +static void rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx); +static int rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx); /** Multiply with the generator: R = a*G */ -static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *a); +static void rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *a); -static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, const unsigned char *seed32); +static void rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, const unsigned char *seed32); #endif /* SECP256K1_ECMULT_GEN_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h index 5e658a3..6da969e 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h @@ -17,20 +17,20 @@ #endif #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_4_0_ecmult_gen_context*) NULL)->prec)); + static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_4_1_ecmult_gen_context*) NULL)->prec)); #else static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0; #endif -static void rustsecp256k1_v0_4_0_ecmult_gen_context_init(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx) { +static void rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) { ctx->prec = NULL; } -static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, void **prealloc) { +static void rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, void **prealloc) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - rustsecp256k1_v0_4_0_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; - rustsecp256k1_v0_4_0_gej gj; - rustsecp256k1_v0_4_0_gej nums_gej; + rustsecp256k1_v0_4_1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; + rustsecp256k1_v0_4_1_gej gj; + rustsecp256k1_v0_4_1_gej nums_gej; int i, j; size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; void* const base = *prealloc; @@ -40,101 +40,101 @@ static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_e return; } #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - ctx->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size); + ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size); /* get the generator */ - rustsecp256k1_v0_4_0_gej_set_ge(&gj, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_gej_set_ge(&gj, &rustsecp256k1_v0_4_1_ge_const_g); /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ { static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; - rustsecp256k1_v0_4_0_fe nums_x; - rustsecp256k1_v0_4_0_ge nums_ge; + rustsecp256k1_v0_4_1_fe nums_x; + rustsecp256k1_v0_4_1_ge nums_ge; int r; - r = rustsecp256k1_v0_4_0_fe_set_b32(&nums_x, nums_b32); + r = rustsecp256k1_v0_4_1_fe_set_b32(&nums_x, nums_b32); (void)r; VERIFY_CHECK(r); - r = rustsecp256k1_v0_4_0_ge_set_xo_var(&nums_ge, &nums_x, 0); + r = rustsecp256k1_v0_4_1_ge_set_xo_var(&nums_ge, &nums_x, 0); (void)r; VERIFY_CHECK(r); - rustsecp256k1_v0_4_0_gej_set_ge(&nums_gej, &nums_ge); + rustsecp256k1_v0_4_1_gej_set_ge(&nums_gej, &nums_ge); /* Add G to make the bits in x uniformly distributed. */ - rustsecp256k1_v0_4_0_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_4_0_ge_const_g, NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_4_1_ge_const_g, NULL); } /* compute prec. */ { - rustsecp256k1_v0_4_0_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */ - rustsecp256k1_v0_4_0_gej gbase; - rustsecp256k1_v0_4_0_gej numsbase; + rustsecp256k1_v0_4_1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */ + rustsecp256k1_v0_4_1_gej gbase; + rustsecp256k1_v0_4_1_gej numsbase; gbase = gj; /* PREC_G^j * G */ numsbase = nums_gej; /* 2^j * nums. */ for (j = 0; j < ECMULT_GEN_PREC_N; j++) { /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */ precj[j*ECMULT_GEN_PREC_G] = numsbase; for (i = 1; i < ECMULT_GEN_PREC_G; i++) { - rustsecp256k1_v0_4_0_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL); + rustsecp256k1_v0_4_1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL); } /* Multiply gbase by PREC_G. */ for (i = 0; i < ECMULT_GEN_PREC_B; i++) { - rustsecp256k1_v0_4_0_gej_double_var(&gbase, &gbase, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ - rustsecp256k1_v0_4_0_gej_double_var(&numsbase, &numsbase, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&numsbase, &numsbase, NULL); if (j == ECMULT_GEN_PREC_N - 2) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ - rustsecp256k1_v0_4_0_gej_neg(&numsbase, &numsbase); - rustsecp256k1_v0_4_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); + rustsecp256k1_v0_4_1_gej_neg(&numsbase, &numsbase); + rustsecp256k1_v0_4_1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - rustsecp256k1_v0_4_0_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G); + rustsecp256k1_v0_4_1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G); } for (j = 0; j < ECMULT_GEN_PREC_N; j++) { for (i = 0; i < ECMULT_GEN_PREC_G; i++) { - rustsecp256k1_v0_4_0_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]); + rustsecp256k1_v0_4_1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]); } } #else (void)prealloc; - ctx->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])rustsecp256k1_v0_4_0_ecmult_static_context; + ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])rustsecp256k1_v0_4_1_ecmult_static_context; #endif - rustsecp256k1_v0_4_0_ecmult_gen_blind(ctx, NULL); + rustsecp256k1_v0_4_1_ecmult_gen_blind(ctx, NULL); } -static int rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx) { +static int rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx) { return ctx->prec != NULL; } -static void rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_gen_context *dst, const rustsecp256k1_v0_4_0_ecmult_gen_context *src) { +static void rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context *dst, const rustsecp256k1_v0_4_1_ecmult_gen_context *src) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION if (src->prec != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ - dst->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); + dst->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); } #else (void)dst, (void)src; #endif } -static void rustsecp256k1_v0_4_0_ecmult_gen_context_clear(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx) { - rustsecp256k1_v0_4_0_scalar_clear(&ctx->blind); - rustsecp256k1_v0_4_0_gej_clear(&ctx->initial); +static void rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) { + rustsecp256k1_v0_4_1_scalar_clear(&ctx->blind); + rustsecp256k1_v0_4_1_gej_clear(&ctx->initial); ctx->prec = NULL; } -static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *gn) { - rustsecp256k1_v0_4_0_ge add; - rustsecp256k1_v0_4_0_ge_storage adds; - rustsecp256k1_v0_4_0_scalar gnb; +static void rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *gn) { + rustsecp256k1_v0_4_1_ge add; + rustsecp256k1_v0_4_1_ge_storage adds; + rustsecp256k1_v0_4_1_scalar gnb; int bits; int i, j; memset(&adds, 0, sizeof(adds)); *r = ctx->initial; /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ - rustsecp256k1_v0_4_0_scalar_add(&gnb, gn, &ctx->blind); + rustsecp256k1_v0_4_1_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (j = 0; j < ECMULT_GEN_PREC_N; j++) { - bits = rustsecp256k1_v0_4_0_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B); + bits = rustsecp256k1_v0_4_1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B); for (i = 0; i < ECMULT_GEN_PREC_G; i++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing @@ -146,33 +146,33 @@ static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_ge * by Dag Arne Osvik, Adi Shamir, and Eran Tromer * (https://www.tau.ac.il/~tromer/papers/cache.pdf) */ - rustsecp256k1_v0_4_0_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); + rustsecp256k1_v0_4_1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); } - rustsecp256k1_v0_4_0_ge_from_storage(&add, &adds); - rustsecp256k1_v0_4_0_gej_add_ge(r, r, &add); + rustsecp256k1_v0_4_1_ge_from_storage(&add, &adds); + rustsecp256k1_v0_4_1_gej_add_ge(r, r, &add); } bits = 0; - rustsecp256k1_v0_4_0_ge_clear(&add); - rustsecp256k1_v0_4_0_scalar_clear(&gnb); + rustsecp256k1_v0_4_1_ge_clear(&add); + rustsecp256k1_v0_4_1_scalar_clear(&gnb); } -/* Setup blinding values for rustsecp256k1_v0_4_0_ecmult_gen. */ -static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, const unsigned char *seed32) { - rustsecp256k1_v0_4_0_scalar b; - rustsecp256k1_v0_4_0_gej gb; - rustsecp256k1_v0_4_0_fe s; +/* Setup blinding values for rustsecp256k1_v0_4_1_ecmult_gen. */ +static void rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, const unsigned char *seed32) { + rustsecp256k1_v0_4_1_scalar b; + rustsecp256k1_v0_4_1_gej gb; + rustsecp256k1_v0_4_1_fe s; unsigned char nonce32[32]; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng; int overflow; unsigned char keydata[64] = {0}; if (seed32 == NULL) { /* When seed is NULL, reset the initial point and blinding value. */ - rustsecp256k1_v0_4_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_4_0_ge_const_g); - rustsecp256k1_v0_4_0_gej_neg(&ctx->initial, &ctx->initial); - rustsecp256k1_v0_4_0_scalar_set_int(&ctx->blind, 1); + rustsecp256k1_v0_4_1_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_4_1_ge_const_g); + rustsecp256k1_v0_4_1_gej_neg(&ctx->initial, &ctx->initial); + rustsecp256k1_v0_4_1_scalar_set_int(&ctx->blind, 1); } /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ - rustsecp256k1_v0_4_0_scalar_get_b32(nonce32, &ctx->blind); + rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &ctx->blind); /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, * and guards against weak or adversarial seeds. This is a simpler and safer interface than * asking the caller for blinding values directly and expecting them to retry on failure. @@ -181,28 +181,28 @@ static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_ge if (seed32 != NULL) { memcpy(keydata + 32, seed32, 32); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); memset(keydata, 0, sizeof(keydata)); /* Accept unobservably small non-uniformity. */ - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - overflow = !rustsecp256k1_v0_4_0_fe_set_b32(&s, nonce32); - overflow |= rustsecp256k1_v0_4_0_fe_is_zero(&s); - rustsecp256k1_v0_4_0_fe_cmov(&s, &rustsecp256k1_v0_4_0_fe_one, overflow); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + overflow = !rustsecp256k1_v0_4_1_fe_set_b32(&s, nonce32); + overflow |= rustsecp256k1_v0_4_1_fe_is_zero(&s); + rustsecp256k1_v0_4_1_fe_cmov(&s, &rustsecp256k1_v0_4_1_fe_one, overflow); /* Randomize the projection to defend against multiplier sidechannels. */ - rustsecp256k1_v0_4_0_gej_rescale(&ctx->initial, &s); - rustsecp256k1_v0_4_0_fe_clear(&s); - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - rustsecp256k1_v0_4_0_scalar_set_b32(&b, nonce32, NULL); + rustsecp256k1_v0_4_1_gej_rescale(&ctx->initial, &s); + rustsecp256k1_v0_4_1_fe_clear(&s); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_4_1_scalar_set_b32(&b, nonce32, NULL); /* A blinding value of 0 works, but would undermine the projection hardening. */ - rustsecp256k1_v0_4_0_scalar_cmov(&b, &rustsecp256k1_v0_4_0_scalar_one, rustsecp256k1_v0_4_0_scalar_is_zero(&b)); - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_scalar_cmov(&b, &rustsecp256k1_v0_4_1_scalar_one, rustsecp256k1_v0_4_1_scalar_is_zero(&b)); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); memset(nonce32, 0, 32); - rustsecp256k1_v0_4_0_ecmult_gen(ctx, &gb, &b); - rustsecp256k1_v0_4_0_scalar_negate(&b, &b); + rustsecp256k1_v0_4_1_ecmult_gen(ctx, &gb, &b); + rustsecp256k1_v0_4_1_scalar_negate(&b, &b); ctx->blind = b; ctx->initial = gb; - rustsecp256k1_v0_4_0_scalar_clear(&b); - rustsecp256k1_v0_4_0_gej_clear(&gb); + rustsecp256k1_v0_4_1_scalar_clear(&b); + rustsecp256k1_v0_4_1_gej_clear(&gb); } #endif /* SECP256K1_ECMULT_GEN_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h index 3cdbcd2..cc219e6 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h @@ -35,8 +35,8 @@ /** Larger values for ECMULT_WINDOW_SIZE result in possibly better * performance at the cost of an exponentially larger precomputed * table. The exact table size is - * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_4_0_ge_storage) bytes, - * where sizeof(rustsecp256k1_v0_4_0_ge_storage) is typically 64 bytes but can + * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_4_1_ge_storage) bytes, + * where sizeof(rustsecp256k1_v0_4_1_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. * Two tables of this size are used (due to the endomorphism * optimization). @@ -82,14 +82,14 @@ * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. * Prej's Z values are undefined, except for the last value. */ -static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_4_0_gej *prej, rustsecp256k1_v0_4_0_fe *zr, const rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_gej d; - rustsecp256k1_v0_4_0_ge a_ge, d_ge; +static void rustsecp256k1_v0_4_1_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_4_1_gej *prej, rustsecp256k1_v0_4_1_fe *zr, const rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_gej d; + rustsecp256k1_v0_4_1_ge a_ge, d_ge; int i; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_4_0_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&d, a, NULL); /* * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate @@ -99,7 +99,7 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(int n, rustsecp256k1 d_ge.y = d.y; d_ge.infinity = 0; - rustsecp256k1_v0_4_0_ge_set_gej_zinv(&a_ge, a, &d.z); + rustsecp256k1_v0_4_1_ge_set_gej_zinv(&a_ge, a, &d.z); prej[0].x = a_ge.x; prej[0].y = a_ge.y; prej[0].z = a->z; @@ -107,24 +107,24 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(int n, rustsecp256k1 zr[0] = d.z; for (i = 1; i < n; i++) { - rustsecp256k1_v0_4_0_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); + rustsecp256k1_v0_4_1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); } /* * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only * the final point's z coordinate is actually used though, so just update that. */ - rustsecp256k1_v0_4_0_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); + rustsecp256k1_v0_4_1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); } /** Fill a table 'pre' with precomputed odd multiples of a. * * There are two versions of this function: - * - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_globalz_windowa which brings its + * - rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_globalz_windowa which brings its * resulting point set to a single constant Z denominator, stores the X and Y * coordinates as ge_storage points in pre, and stores the global Z in rz. * It only operates on tables sized for WINDOW_A wnaf multiples. - * - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var, which converts its + * - rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_storage_var, which converts its * resulting point set to actually affine points, and stores those in pre. * It operates on tables of any size. * @@ -132,32 +132,32 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(int n, rustsecp256k1 * and for G using the second (which requires an inverse, but it only needs to * happen once). */ -static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_4_0_ge *pre, rustsecp256k1_v0_4_0_fe *globalz, const rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_4_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; +static void rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_4_1_ge *pre, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_4_1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; /* Compute the odd multiples in Jacobian form. */ - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); /* Bring them to the same Z denominator. */ - rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); + rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); } -static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const int n, rustsecp256k1_v0_4_0_ge_storage *pre, const rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_gej d; - rustsecp256k1_v0_4_0_ge d_ge, p_ge; - rustsecp256k1_v0_4_0_gej pj; - rustsecp256k1_v0_4_0_fe zi; - rustsecp256k1_v0_4_0_fe zr; - rustsecp256k1_v0_4_0_fe dx_over_dz_squared; +static void rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_storage_var(const int n, rustsecp256k1_v0_4_1_ge_storage *pre, const rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_gej d; + rustsecp256k1_v0_4_1_ge d_ge, p_ge; + rustsecp256k1_v0_4_1_gej pj; + rustsecp256k1_v0_4_1_fe zi; + rustsecp256k1_v0_4_1_fe zr; + rustsecp256k1_v0_4_1_fe dx_over_dz_squared; int i; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_4_0_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&d, a, NULL); /* First, we perform all the additions in an isomorphic curve obtained by multiplying * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use - * `rustsecp256k1_v0_4_0_gej_add_ge_var` to perform the additions. For each addition, we store + * `rustsecp256k1_v0_4_1_gej_add_ge_var` to perform the additions. For each addition, we store * the resulting y-coordinate and the z-ratio, since we only have enough memory to * store two field elements. These are sufficient to efficiently undo the isomorphism * and recompute all the `x`s. @@ -166,34 +166,34 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const in d_ge.y = d.y; d_ge.infinity = 0; - rustsecp256k1_v0_4_0_ge_set_gej_zinv(&p_ge, a, &d.z); + rustsecp256k1_v0_4_1_ge_set_gej_zinv(&p_ge, a, &d.z); pj.x = p_ge.x; pj.y = p_ge.y; pj.z = a->z; pj.infinity = 0; for (i = 0; i < (n - 1); i++) { - rustsecp256k1_v0_4_0_fe_normalize_var(&pj.y); - rustsecp256k1_v0_4_0_fe_to_storage(&pre[i].y, &pj.y); - rustsecp256k1_v0_4_0_gej_add_ge_var(&pj, &pj, &d_ge, &zr); - rustsecp256k1_v0_4_0_fe_normalize_var(&zr); - rustsecp256k1_v0_4_0_fe_to_storage(&pre[i].x, &zr); + rustsecp256k1_v0_4_1_fe_normalize_var(&pj.y); + rustsecp256k1_v0_4_1_fe_to_storage(&pre[i].y, &pj.y); + rustsecp256k1_v0_4_1_gej_add_ge_var(&pj, &pj, &d_ge, &zr); + rustsecp256k1_v0_4_1_fe_normalize_var(&zr); + rustsecp256k1_v0_4_1_fe_to_storage(&pre[i].x, &zr); } /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */ - rustsecp256k1_v0_4_0_fe_mul(&zi, &pj.z, &d.z); - rustsecp256k1_v0_4_0_fe_inv_var(&zi, &zi); + rustsecp256k1_v0_4_1_fe_mul(&zi, &pj.z, &d.z); + rustsecp256k1_v0_4_1_fe_inv_var(&zi, &zi); /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so * that we can combine it with the saved z-ratios to compute the other zs * without any more inversions. */ - rustsecp256k1_v0_4_0_ge_set_gej_zinv(&p_ge, &pj, &zi); - rustsecp256k1_v0_4_0_ge_to_storage(&pre[n - 1], &p_ge); + rustsecp256k1_v0_4_1_ge_set_gej_zinv(&p_ge, &pj, &zi); + rustsecp256k1_v0_4_1_ge_to_storage(&pre[n - 1], &p_ge); /* Compute the actual x-coordinate of D, which will be needed below. */ - rustsecp256k1_v0_4_0_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ - rustsecp256k1_v0_4_0_fe_sqr(&dx_over_dz_squared, &d.z); - rustsecp256k1_v0_4_0_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); + rustsecp256k1_v0_4_1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ + rustsecp256k1_v0_4_1_fe_sqr(&dx_over_dz_squared, &d.z); + rustsecp256k1_v0_4_1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); /* Going into the second loop, we have set `pre[n-1]` to its final affine * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We @@ -217,21 +217,21 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const in */ i = n - 1; while (i > 0) { - rustsecp256k1_v0_4_0_fe zi2, zi3; - const rustsecp256k1_v0_4_0_fe *rzr; + rustsecp256k1_v0_4_1_fe zi2, zi3; + const rustsecp256k1_v0_4_1_fe *rzr; i--; - rustsecp256k1_v0_4_0_ge_from_storage(&p_ge, &pre[i]); + rustsecp256k1_v0_4_1_ge_from_storage(&p_ge, &pre[i]); /* For each remaining point, we extract the z-ratio from the stored * x-coordinate, compute its z^-1 from that, and compute the full * point from that. */ rzr = &p_ge.x; - rustsecp256k1_v0_4_0_fe_mul(&zi, &zi, rzr); - rustsecp256k1_v0_4_0_fe_sqr(&zi2, &zi); - rustsecp256k1_v0_4_0_fe_mul(&zi3, &zi2, &zi); + rustsecp256k1_v0_4_1_fe_mul(&zi, &zi, rzr); + rustsecp256k1_v0_4_1_fe_sqr(&zi2, &zi); + rustsecp256k1_v0_4_1_fe_mul(&zi3, &zi2, &zi); /* To compute the actual x-coordinate, we use the stored z ratio and - * y-coordinate, which we obtained from `rustsecp256k1_v0_4_0_gej_add_ge_var` + * y-coordinate, which we obtained from `rustsecp256k1_v0_4_1_gej_add_ge_var` * in the loop above, as well as the inverse of the square of its * z-coordinate. We store the latter in the `zi2` variable, which is * computed iteratively starting from the overall Z inverse then @@ -263,13 +263,13 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const in * X = d_x / d_z^2 - rzr / z^2 * = dx_over_dz_squared - rzr * zi2 */ - rustsecp256k1_v0_4_0_fe_mul(&p_ge.x, rzr, &zi2); - rustsecp256k1_v0_4_0_fe_negate(&p_ge.x, &p_ge.x, 1); - rustsecp256k1_v0_4_0_fe_add(&p_ge.x, &dx_over_dz_squared); + rustsecp256k1_v0_4_1_fe_mul(&p_ge.x, rzr, &zi2); + rustsecp256k1_v0_4_1_fe_negate(&p_ge.x, &p_ge.x, 1); + rustsecp256k1_v0_4_1_fe_add(&p_ge.x, &dx_over_dz_squared); /* y is stored_y/z^3, as we expect */ - rustsecp256k1_v0_4_0_fe_mul(&p_ge.y, &p_ge.y, &zi3); + rustsecp256k1_v0_4_1_fe_mul(&p_ge.y, &p_ge.y, &zi3); /* Store */ - rustsecp256k1_v0_4_0_ge_to_storage(&pre[i], &p_ge); + rustsecp256k1_v0_4_1_ge_to_storage(&pre[i], &p_ge); } } @@ -283,7 +283,7 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const in *(r) = (pre)[((n)-1)/2]; \ } else { \ *(r) = (pre)[(-(n)-1)/2]; \ - rustsecp256k1_v0_4_0_fe_negate(&((r)->y), &((r)->y), 1); \ + rustsecp256k1_v0_4_1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) @@ -292,25 +292,25 @@ static void rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(const in VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ - rustsecp256k1_v0_4_0_ge_from_storage((r), &(pre)[((n)-1)/2]); \ + rustsecp256k1_v0_4_1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ } else { \ - rustsecp256k1_v0_4_0_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ - rustsecp256k1_v0_4_0_fe_negate(&((r)->y), &((r)->y), 1); \ + rustsecp256k1_v0_4_1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ + rustsecp256k1_v0_4_1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE = - ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_4_0_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) - + ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_4_0_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) + ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_4_1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) + + ROUND_TO_ALIGN(sizeof((*((rustsecp256k1_v0_4_1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) ; -static void rustsecp256k1_v0_4_0_ecmult_context_init(rustsecp256k1_v0_4_0_ecmult_context *ctx) { +static void rustsecp256k1_v0_4_1_ecmult_context_init(rustsecp256k1_v0_4_1_ecmult_context *ctx) { ctx->pre_g = NULL; ctx->pre_g_128 = NULL; } -static void rustsecp256k1_v0_4_0_ecmult_context_build(rustsecp256k1_v0_4_0_ecmult_context *ctx, void **prealloc) { - rustsecp256k1_v0_4_0_gej gj; +static void rustsecp256k1_v0_4_1_ecmult_context_build(rustsecp256k1_v0_4_1_ecmult_context *ctx, void **prealloc) { + rustsecp256k1_v0_4_1_gej gj; void* const base = *prealloc; size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; @@ -319,52 +319,52 @@ static void rustsecp256k1_v0_4_0_ecmult_context_build(rustsecp256k1_v0_4_0_ecmul } /* get the generator */ - rustsecp256k1_v0_4_0_gej_set_ge(&gj, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_gej_set_ge(&gj, &rustsecp256k1_v0_4_1_ge_const_g); { size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); - ctx->pre_g = (rustsecp256k1_v0_4_0_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); + ctx->pre_g = (rustsecp256k1_v0_4_1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); } /* precompute the tables with odd multiples */ - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); { - rustsecp256k1_v0_4_0_gej g_128j; + rustsecp256k1_v0_4_1_gej g_128j; int i; size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); - ctx->pre_g_128 = (rustsecp256k1_v0_4_0_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); + ctx->pre_g_128 = (rustsecp256k1_v0_4_1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); /* calculate 2^128*generator */ g_128j = gj; for (i = 0; i < 128; i++) { - rustsecp256k1_v0_4_0_gej_double_var(&g_128j, &g_128j, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&g_128j, &g_128j, NULL); } - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } } -static void rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_context *dst, const rustsecp256k1_v0_4_0_ecmult_context *src) { +static void rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_context *dst, const rustsecp256k1_v0_4_1_ecmult_context *src) { if (src->pre_g != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ - dst->pre_g = (rustsecp256k1_v0_4_0_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); + dst->pre_g = (rustsecp256k1_v0_4_1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); } if (src->pre_g_128 != NULL) { - dst->pre_g_128 = (rustsecp256k1_v0_4_0_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); + dst->pre_g_128 = (rustsecp256k1_v0_4_1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); } } -static int rustsecp256k1_v0_4_0_ecmult_context_is_built(const rustsecp256k1_v0_4_0_ecmult_context *ctx) { +static int rustsecp256k1_v0_4_1_ecmult_context_is_built(const rustsecp256k1_v0_4_1_ecmult_context *ctx) { return ctx->pre_g != NULL; } -static void rustsecp256k1_v0_4_0_ecmult_context_clear(rustsecp256k1_v0_4_0_ecmult_context *ctx) { - rustsecp256k1_v0_4_0_ecmult_context_init(ctx); +static void rustsecp256k1_v0_4_1_ecmult_context_clear(rustsecp256k1_v0_4_1_ecmult_context *ctx) { + rustsecp256k1_v0_4_1_ecmult_context_init(ctx); } /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), @@ -374,8 +374,8 @@ static void rustsecp256k1_v0_4_0_ecmult_context_clear(rustsecp256k1_v0_4_0_ecmul * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ -static int rustsecp256k1_v0_4_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_4_0_scalar *a, int w) { - rustsecp256k1_v0_4_0_scalar s; +static int rustsecp256k1_v0_4_1_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_4_1_scalar *a, int w) { + rustsecp256k1_v0_4_1_scalar s; int last_set_bit = -1; int bit = 0; int sign = 1; @@ -389,15 +389,15 @@ static int rustsecp256k1_v0_4_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 memset(wnaf, 0, len * sizeof(wnaf[0])); s = *a; - if (rustsecp256k1_v0_4_0_scalar_get_bits(&s, 255, 1)) { - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); + if (rustsecp256k1_v0_4_1_scalar_get_bits(&s, 255, 1)) { + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; - if (rustsecp256k1_v0_4_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { + if (rustsecp256k1_v0_4_1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } @@ -407,7 +407,7 @@ static int rustsecp256k1_v0_4_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 now = len - bit; } - word = rustsecp256k1_v0_4_0_scalar_get_bits_var(&s, bit, now) + carry; + word = rustsecp256k1_v0_4_1_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; @@ -420,14 +420,14 @@ static int rustsecp256k1_v0_4_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 #ifdef VERIFY CHECK(carry == 0); while (bit < 256) { - CHECK(rustsecp256k1_v0_4_0_scalar_get_bits(&s, bit++, 1) == 0); + CHECK(rustsecp256k1_v0_4_1_scalar_get_bits(&s, bit++, 1) == 0); } #endif return last_set_bit + 1; } -struct rustsecp256k1_v0_4_0_strauss_point_state { - rustsecp256k1_v0_4_0_scalar na_1, na_lam; +struct rustsecp256k1_v0_4_1_strauss_point_state { + rustsecp256k1_v0_4_1_scalar na_1, na_lam; int wnaf_na_1[129]; int wnaf_na_lam[129]; int bits_na_1; @@ -435,19 +435,19 @@ struct rustsecp256k1_v0_4_0_strauss_point_state { size_t input_pos; }; -struct rustsecp256k1_v0_4_0_strauss_state { - rustsecp256k1_v0_4_0_gej* prej; - rustsecp256k1_v0_4_0_fe* zr; - rustsecp256k1_v0_4_0_ge* pre_a; - rustsecp256k1_v0_4_0_ge* pre_a_lam; - struct rustsecp256k1_v0_4_0_strauss_point_state* ps; +struct rustsecp256k1_v0_4_1_strauss_state { + rustsecp256k1_v0_4_1_gej* prej; + rustsecp256k1_v0_4_1_fe* zr; + rustsecp256k1_v0_4_1_ge* pre_a; + rustsecp256k1_v0_4_1_ge* pre_a_lam; + struct rustsecp256k1_v0_4_1_strauss_point_state* ps; }; -static void rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const struct rustsecp256k1_v0_4_0_strauss_state *state, rustsecp256k1_v0_4_0_gej *r, size_t num, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_scalar *na, const rustsecp256k1_v0_4_0_scalar *ng) { - rustsecp256k1_v0_4_0_ge tmpa; - rustsecp256k1_v0_4_0_fe Z; +static void rustsecp256k1_v0_4_1_ecmult_strauss_wnaf(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const struct rustsecp256k1_v0_4_1_strauss_state *state, rustsecp256k1_v0_4_1_gej *r, size_t num, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_scalar *na, const rustsecp256k1_v0_4_1_scalar *ng) { + rustsecp256k1_v0_4_1_ge tmpa; + rustsecp256k1_v0_4_1_fe Z; /* Splitted G factors. */ - rustsecp256k1_v0_4_0_scalar ng_1, ng_128; + rustsecp256k1_v0_4_1_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; @@ -458,16 +458,16 @@ static void rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(const rustsecp256k1_v0_4_0_ size_t no = 0; for (np = 0; np < num; ++np) { - if (rustsecp256k1_v0_4_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_4_0_gej_is_infinity(&a[np])) { + if (rustsecp256k1_v0_4_1_scalar_is_zero(&na[np]) || rustsecp256k1_v0_4_1_gej_is_infinity(&a[np])) { continue; } state->ps[no].input_pos = np; /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - rustsecp256k1_v0_4_0_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); + rustsecp256k1_v0_4_1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ - state->ps[no].bits_na_1 = rustsecp256k1_v0_4_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A); - state->ps[no].bits_na_lam = rustsecp256k1_v0_4_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A); + state->ps[no].bits_na_1 = rustsecp256k1_v0_4_1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A); + state->ps[no].bits_na_lam = rustsecp256k1_v0_4_1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); if (state->ps[no].bits_na_1 > bits) { @@ -486,40 +486,40 @@ static void rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(const rustsecp256k1_v0_4_0_ * the Z coordinate of the result once at the end. * The exception is the precomputed G table points, which are actually * affine. Compared to the base used for other points, they have a Z ratio - * of 1/Z, so we can use rustsecp256k1_v0_4_0_gej_add_zinv_var, which uses the same + * of 1/Z, so we can use rustsecp256k1_v0_4_1_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ if (no > 0) { /* Compute the odd multiples in Jacobian form. */ - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); for (np = 1; np < no; ++np) { - rustsecp256k1_v0_4_0_gej tmp = a[state->ps[np].input_pos]; + rustsecp256k1_v0_4_1_gej tmp = a[state->ps[np].input_pos]; #ifdef VERIFY - rustsecp256k1_v0_4_0_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); + rustsecp256k1_v0_4_1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); #endif - rustsecp256k1_v0_4_0_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); - rustsecp256k1_v0_4_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); - rustsecp256k1_v0_4_0_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); + rustsecp256k1_v0_4_1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); + rustsecp256k1_v0_4_1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); + rustsecp256k1_v0_4_1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); } /* Bring them to the same Z denominator. */ - rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); + rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); } else { - rustsecp256k1_v0_4_0_fe_set_int(&Z, 1); + rustsecp256k1_v0_4_1_fe_set_int(&Z, 1); } for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_4_0_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); + rustsecp256k1_v0_4_1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ - rustsecp256k1_v0_4_0_scalar_split_128(&ng_1, &ng_128, ng); + rustsecp256k1_v0_4_1_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ - bits_ng_1 = rustsecp256k1_v0_4_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); - bits_ng_128 = rustsecp256k1_v0_4_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); + bits_ng_1 = rustsecp256k1_v0_4_1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); + bits_ng_128 = rustsecp256k1_v0_4_1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } @@ -528,102 +528,102 @@ static void rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(const rustsecp256k1_v0_4_0_ } } - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; - rustsecp256k1_v0_4_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_4_1_gej_double_var(r, r, NULL); for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_4_0_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_4_0_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - rustsecp256k1_v0_4_0_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_4_1_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); - rustsecp256k1_v0_4_0_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_4_1_gej_add_zinv_var(r, r, &tmpa, &Z); } } if (!r->infinity) { - rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &Z); } } -static void rustsecp256k1_v0_4_0_ecmult(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_scalar *na, const rustsecp256k1_v0_4_0_scalar *ng) { - rustsecp256k1_v0_4_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_4_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_4_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct rustsecp256k1_v0_4_0_strauss_point_state ps[1]; - rustsecp256k1_v0_4_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct rustsecp256k1_v0_4_0_strauss_state state; +static void rustsecp256k1_v0_4_1_ecmult(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_scalar *na, const rustsecp256k1_v0_4_1_scalar *ng) { + rustsecp256k1_v0_4_1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_4_1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_4_1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + struct rustsecp256k1_v0_4_1_strauss_point_state ps[1]; + rustsecp256k1_v0_4_1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; + struct rustsecp256k1_v0_4_1_strauss_state state; state.prej = prej; state.zr = zr; state.pre_a = pre_a; state.pre_a_lam = pre_a_lam; state.ps = ps; - rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); + rustsecp256k1_v0_4_1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); } -static size_t rustsecp256k1_v0_4_0_strauss_scratch_size(size_t n_points) { - static const size_t point_size = (2 * sizeof(rustsecp256k1_v0_4_0_ge) + sizeof(rustsecp256k1_v0_4_0_gej) + sizeof(rustsecp256k1_v0_4_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_4_0_strauss_point_state) + sizeof(rustsecp256k1_v0_4_0_gej) + sizeof(rustsecp256k1_v0_4_0_scalar); +static size_t rustsecp256k1_v0_4_1_strauss_scratch_size(size_t n_points) { + static const size_t point_size = (2 * sizeof(rustsecp256k1_v0_4_1_ge) + sizeof(rustsecp256k1_v0_4_1_gej) + sizeof(rustsecp256k1_v0_4_1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_4_1_strauss_point_state) + sizeof(rustsecp256k1_v0_4_1_gej) + sizeof(rustsecp256k1_v0_4_1_scalar); return n_points*point_size; } -static int rustsecp256k1_v0_4_0_ecmult_strauss_batch(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - rustsecp256k1_v0_4_0_gej* points; - rustsecp256k1_v0_4_0_scalar* scalars; - struct rustsecp256k1_v0_4_0_strauss_state state; +static int rustsecp256k1_v0_4_1_ecmult_strauss_batch(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + rustsecp256k1_v0_4_1_gej* points; + rustsecp256k1_v0_4_1_scalar* scalars; + struct rustsecp256k1_v0_4_1_strauss_state state; size_t i; - const size_t scratch_checkpoint = rustsecp256k1_v0_4_0_scratch_checkpoint(error_callback, scratch); + const size_t scratch_checkpoint = rustsecp256k1_v0_4_1_scratch_checkpoint(error_callback, scratch); - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - points = (rustsecp256k1_v0_4_0_gej*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_4_0_gej)); - scalars = (rustsecp256k1_v0_4_0_scalar*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_4_0_scalar)); - state.prej = (rustsecp256k1_v0_4_0_gej*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_0_gej)); - state.zr = (rustsecp256k1_v0_4_0_fe*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_0_fe)); - state.pre_a = (rustsecp256k1_v0_4_0_ge*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_0_ge)); - state.pre_a_lam = (rustsecp256k1_v0_4_0_ge*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_0_ge)); - state.ps = (struct rustsecp256k1_v0_4_0_strauss_point_state*)rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_4_0_strauss_point_state)); + points = (rustsecp256k1_v0_4_1_gej*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_4_1_gej)); + scalars = (rustsecp256k1_v0_4_1_scalar*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_4_1_scalar)); + state.prej = (rustsecp256k1_v0_4_1_gej*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_1_gej)); + state.zr = (rustsecp256k1_v0_4_1_fe*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_1_fe)); + state.pre_a = (rustsecp256k1_v0_4_1_ge*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_1_ge)); + state.pre_a_lam = (rustsecp256k1_v0_4_1_ge*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_4_1_ge)); + state.ps = (struct rustsecp256k1_v0_4_1_strauss_point_state*)rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_4_1_strauss_point_state)); if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL || state.pre_a_lam == NULL || state.ps == NULL) { - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } for (i = 0; i < n_points; i++) { - rustsecp256k1_v0_4_0_ge point; + rustsecp256k1_v0_4_1_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - rustsecp256k1_v0_4_0_gej_set_ge(&points[i], &point); + rustsecp256k1_v0_4_1_gej_set_ge(&points[i], &point); } - rustsecp256k1_v0_4_0_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } -/* Wrapper for rustsecp256k1_v0_4_0_ecmult_multi_func interface */ -static int rustsecp256k1_v0_4_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *actx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n) { - return rustsecp256k1_v0_4_0_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); +/* Wrapper for rustsecp256k1_v0_4_1_ecmult_multi_func interface */ +static int rustsecp256k1_v0_4_1_ecmult_strauss_batch_single(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *actx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n) { + return rustsecp256k1_v0_4_1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); } -static size_t rustsecp256k1_v0_4_0_strauss_max_points(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch *scratch) { - return rustsecp256k1_v0_4_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_4_0_strauss_scratch_size(1); +static size_t rustsecp256k1_v0_4_1_strauss_max_points(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch *scratch) { + return rustsecp256k1_v0_4_1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_4_1_strauss_scratch_size(1); } /** Convert a number to WNAF notation. @@ -633,25 +633,25 @@ static size_t rustsecp256k1_v0_4_0_strauss_max_points(const rustsecp256k1_v0_4_0 * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ -static int rustsecp256k1_v0_4_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_4_0_scalar *s, int w) { +static int rustsecp256k1_v0_4_1_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_4_1_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; - const rustsecp256k1_v0_4_0_scalar *work = s; + const rustsecp256k1_v0_4_1_scalar *work = s; - if (rustsecp256k1_v0_4_0_scalar_is_zero(s)) { + if (rustsecp256k1_v0_4_1_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } - if (rustsecp256k1_v0_4_0_scalar_is_even(s)) { + if (rustsecp256k1_v0_4_1_scalar_is_even(s)) { skew = 1; } - wnaf[0] = rustsecp256k1_v0_4_0_scalar_get_bits_var(work, 0, w) + skew; + wnaf[0] = rustsecp256k1_v0_4_1_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; @@ -659,7 +659,7 @@ static int rustsecp256k1_v0_4_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_4_0 /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { - int val = rustsecp256k1_v0_4_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_4_1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } @@ -669,7 +669,7 @@ static int rustsecp256k1_v0_4_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_4_0 pos = 1; while (pos <= max_pos) { - int val = rustsecp256k1_v0_4_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_4_1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); @@ -695,14 +695,14 @@ static int rustsecp256k1_v0_4_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_4_0 return skew; } -struct rustsecp256k1_v0_4_0_pippenger_point_state { +struct rustsecp256k1_v0_4_1_pippenger_point_state { int skew_na; size_t input_pos; }; -struct rustsecp256k1_v0_4_0_pippenger_state { +struct rustsecp256k1_v0_4_1_pippenger_state { int *wnaf_na; - struct rustsecp256k1_v0_4_0_pippenger_point_state* ps; + struct rustsecp256k1_v0_4_1_pippenger_point_state* ps; }; /* @@ -712,7 +712,7 @@ struct rustsecp256k1_v0_4_0_pippenger_state { * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ -static int rustsecp256k1_v0_4_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_4_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_4_0_pippenger_state *state, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *sc, const rustsecp256k1_v0_4_0_ge *pt, size_t num) { +static int rustsecp256k1_v0_4_1_ecmult_pippenger_wnaf(rustsecp256k1_v0_4_1_gej *buckets, int bucket_window, struct rustsecp256k1_v0_4_1_pippenger_state *state, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *sc, const rustsecp256k1_v0_4_1_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; @@ -720,55 +720,55 @@ static int rustsecp256k1_v0_4_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_4_0_gej * int j; for (np = 0; np < num; ++np) { - if (rustsecp256k1_v0_4_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_4_0_ge_is_infinity(&pt[np])) { + if (rustsecp256k1_v0_4_1_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_4_1_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; - state->ps[no].skew_na = rustsecp256k1_v0_4_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); + state->ps[no].skew_na = rustsecp256k1_v0_4_1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { - rustsecp256k1_v0_4_0_gej running_sum; + rustsecp256k1_v0_4_1_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { - rustsecp256k1_v0_4_0_gej_set_infinity(&buckets[j]); + rustsecp256k1_v0_4_1_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; - struct rustsecp256k1_v0_4_0_pippenger_point_state point_state = state->ps[np]; - rustsecp256k1_v0_4_0_ge tmp; + struct rustsecp256k1_v0_4_1_pippenger_point_state point_state = state->ps[np]; + rustsecp256k1_v0_4_1_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { - rustsecp256k1_v0_4_0_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_4_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); + rustsecp256k1_v0_4_1_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_4_1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; - rustsecp256k1_v0_4_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; - rustsecp256k1_v0_4_0_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_4_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); + rustsecp256k1_v0_4_1_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_4_1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { - rustsecp256k1_v0_4_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_4_1_gej_double_var(r, r, NULL); } - rustsecp256k1_v0_4_0_gej_set_infinity(&running_sum); + rustsecp256k1_v0_4_1_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) @@ -778,13 +778,13 @@ static int rustsecp256k1_v0_4_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_4_0_gej * * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { - rustsecp256k1_v0_4_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); - rustsecp256k1_v0_4_0_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_4_1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); + rustsecp256k1_v0_4_1_gej_add_var(r, r, &running_sum, NULL); } - rustsecp256k1_v0_4_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); - rustsecp256k1_v0_4_0_gej_double_var(r, r, NULL); - rustsecp256k1_v0_4_0_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_4_1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); + rustsecp256k1_v0_4_1_gej_double_var(r, r, NULL); + rustsecp256k1_v0_4_1_gej_add_var(r, r, &running_sum, NULL); } return 1; } @@ -793,7 +793,7 @@ static int rustsecp256k1_v0_4_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_4_0_gej * * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ -static int rustsecp256k1_v0_4_0_pippenger_bucket_window(size_t n) { +static int rustsecp256k1_v0_4_1_pippenger_bucket_window(size_t n) { if (n <= 1) { return 1; } else if (n <= 4) { @@ -822,7 +822,7 @@ static int rustsecp256k1_v0_4_0_pippenger_bucket_window(size_t n) { /** * Returns the maximum optimal number of points for a bucket_window. */ -static size_t rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(int bucket_window) { +static size_t rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { case 1: return 1; case 2: return 4; @@ -841,18 +841,18 @@ static size_t rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(int bucket_window } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_ecmult_endo_split(rustsecp256k1_v0_4_0_scalar *s1, rustsecp256k1_v0_4_0_scalar *s2, rustsecp256k1_v0_4_0_ge *p1, rustsecp256k1_v0_4_0_ge *p2) { - rustsecp256k1_v0_4_0_scalar tmp = *s1; - rustsecp256k1_v0_4_0_scalar_split_lambda(s1, s2, &tmp); - rustsecp256k1_v0_4_0_ge_mul_lambda(p2, p1); +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_ecmult_endo_split(rustsecp256k1_v0_4_1_scalar *s1, rustsecp256k1_v0_4_1_scalar *s2, rustsecp256k1_v0_4_1_ge *p1, rustsecp256k1_v0_4_1_ge *p2) { + rustsecp256k1_v0_4_1_scalar tmp = *s1; + rustsecp256k1_v0_4_1_scalar_split_lambda(s1, s2, &tmp); + rustsecp256k1_v0_4_1_ge_mul_lambda(p2, p1); - if (rustsecp256k1_v0_4_0_scalar_is_high(s1)) { - rustsecp256k1_v0_4_0_scalar_negate(s1, s1); - rustsecp256k1_v0_4_0_ge_neg(p1, p1); + if (rustsecp256k1_v0_4_1_scalar_is_high(s1)) { + rustsecp256k1_v0_4_1_scalar_negate(s1, s1); + rustsecp256k1_v0_4_1_ge_neg(p1, p1); } - if (rustsecp256k1_v0_4_0_scalar_is_high(s2)) { - rustsecp256k1_v0_4_0_scalar_negate(s2, s2); - rustsecp256k1_v0_4_0_ge_neg(p2, p2); + if (rustsecp256k1_v0_4_1_scalar_is_high(s2)) { + rustsecp256k1_v0_4_1_scalar_negate(s2, s2); + rustsecp256k1_v0_4_1_ge_neg(p2, p2); } } @@ -860,89 +860,89 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_ecmult_endo_split(rustsecp256k * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ -static size_t rustsecp256k1_v0_4_0_pippenger_scratch_size(size_t n_points, int bucket_window) { +static size_t rustsecp256k1_v0_4_1_pippenger_scratch_size(size_t n_points, int bucket_window) { size_t entries = 2*n_points + 2; - size_t entry_size = sizeof(rustsecp256k1_v0_4_0_ge) + sizeof(rustsecp256k1_v0_4_0_scalar) + sizeof(struct rustsecp256k1_v0_4_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); - return (sizeof(rustsecp256k1_v0_4_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_4_0_pippenger_state) + entries * entry_size; + size_t entry_size = sizeof(rustsecp256k1_v0_4_1_ge) + sizeof(rustsecp256k1_v0_4_1_scalar) + sizeof(struct rustsecp256k1_v0_4_1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); + return (sizeof(rustsecp256k1_v0_4_1_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_4_1_pippenger_state) + entries * entry_size; } -static int rustsecp256k1_v0_4_0_ecmult_pippenger_batch(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - const size_t scratch_checkpoint = rustsecp256k1_v0_4_0_scratch_checkpoint(error_callback, scratch); +static int rustsecp256k1_v0_4_1_ecmult_pippenger_batch(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + const size_t scratch_checkpoint = rustsecp256k1_v0_4_1_scratch_checkpoint(error_callback, scratch); /* Use 2(n+1) with the endomorphism, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ size_t entries = 2*n_points + 2; - rustsecp256k1_v0_4_0_ge *points; - rustsecp256k1_v0_4_0_scalar *scalars; - rustsecp256k1_v0_4_0_gej *buckets; - struct rustsecp256k1_v0_4_0_pippenger_state *state_space; + rustsecp256k1_v0_4_1_ge *points; + rustsecp256k1_v0_4_1_scalar *scalars; + rustsecp256k1_v0_4_1_gej *buckets; + struct rustsecp256k1_v0_4_1_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; (void)ctx; - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - bucket_window = rustsecp256k1_v0_4_0_pippenger_bucket_window(n_points); - points = (rustsecp256k1_v0_4_0_ge *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); - scalars = (rustsecp256k1_v0_4_0_scalar *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); - state_space = (struct rustsecp256k1_v0_4_0_pippenger_state *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); + bucket_window = rustsecp256k1_v0_4_1_pippenger_bucket_window(n_points); + points = (rustsecp256k1_v0_4_1_ge *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); + scalars = (rustsecp256k1_v0_4_1_scalar *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); + state_space = (struct rustsecp256k1_v0_4_1_pippenger_state *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, sizeof(*state_space)); if (points == NULL || scalars == NULL || state_space == NULL) { - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - state_space->ps = (struct rustsecp256k1_v0_4_0_pippenger_point_state *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); - state_space->wnaf_na = (int *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); - buckets = (rustsecp256k1_v0_4_0_gej *) rustsecp256k1_v0_4_0_scratch_alloc(error_callback, scratch, (1<ps = (struct rustsecp256k1_v0_4_1_pippenger_point_state *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); + state_space->wnaf_na = (int *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); + buckets = (rustsecp256k1_v0_4_1_gej *) rustsecp256k1_v0_4_1_scratch_alloc(error_callback, scratch, (1<ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; - points[0] = rustsecp256k1_v0_4_0_ge_const_g; + points[0] = rustsecp256k1_v0_4_1_ge_const_g; idx++; - rustsecp256k1_v0_4_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); + rustsecp256k1_v0_4_1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } idx++; - rustsecp256k1_v0_4_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); + rustsecp256k1_v0_4_1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; point_idx++; } - rustsecp256k1_v0_4_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); + rustsecp256k1_v0_4_1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { - rustsecp256k1_v0_4_0_scalar_clear(&scalars[i]); + rustsecp256k1_v0_4_1_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } @@ -986,34 +986,34 @@ static size_t rustsecp256k1_v0_4_0_pippenger_max_points(const rustsecp256k1_v0_4 /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ -static int rustsecp256k1_v0_4_0_ecmult_multi_simple_var(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { +static int rustsecp256k1_v0_4_1_ecmult_multi_simple_var(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; - rustsecp256k1_v0_4_0_scalar szero; - rustsecp256k1_v0_4_0_gej tmpj; + rustsecp256k1_v0_4_1_scalar szero; + rustsecp256k1_v0_4_1_gej tmpj; - rustsecp256k1_v0_4_0_scalar_set_int(&szero, 0); - rustsecp256k1_v0_4_0_gej_set_infinity(r); - rustsecp256k1_v0_4_0_gej_set_infinity(&tmpj); + rustsecp256k1_v0_4_1_scalar_set_int(&szero, 0); + rustsecp256k1_v0_4_1_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ - rustsecp256k1_v0_4_0_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); + rustsecp256k1_v0_4_1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { - rustsecp256k1_v0_4_0_ge point; - rustsecp256k1_v0_4_0_gej pointj; - rustsecp256k1_v0_4_0_scalar scalar; + rustsecp256k1_v0_4_1_ge point; + rustsecp256k1_v0_4_1_gej pointj; + rustsecp256k1_v0_4_1_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ - rustsecp256k1_v0_4_0_gej_set_ge(&pointj, &point); - rustsecp256k1_v0_4_0_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); - rustsecp256k1_v0_4_0_gej_add_var(r, r, &tmpj, NULL); + rustsecp256k1_v0_4_1_gej_set_ge(&pointj, &point); + rustsecp256k1_v0_4_1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); + rustsecp256k1_v0_4_1_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ -static int rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { +static int rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } @@ -1031,50 +1031,50 @@ static int rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(size_t *n_batches return 1; } -typedef int (*rustsecp256k1_v0_4_0_ecmult_multi_func)(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context*, rustsecp256k1_v0_4_0_scratch*, rustsecp256k1_v0_4_0_gej*, const rustsecp256k1_v0_4_0_scalar*, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void*, size_t); -static int rustsecp256k1_v0_4_0_ecmult_multi_var(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n) { +typedef int (*rustsecp256k1_v0_4_1_ecmult_multi_func)(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context*, rustsecp256k1_v0_4_1_scratch*, rustsecp256k1_v0_4_1_gej*, const rustsecp256k1_v0_4_1_scalar*, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void*, size_t); +static int rustsecp256k1_v0_4_1_ecmult_multi_var(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; - int (*f)(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context*, rustsecp256k1_v0_4_0_scratch*, rustsecp256k1_v0_4_0_gej*, const rustsecp256k1_v0_4_0_scalar*, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void*, size_t, size_t); + int (*f)(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context*, rustsecp256k1_v0_4_1_scratch*, rustsecp256k1_v0_4_1_gej*, const rustsecp256k1_v0_4_1_scalar*, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { - rustsecp256k1_v0_4_0_scalar szero; - rustsecp256k1_v0_4_0_scalar_set_int(&szero, 0); - rustsecp256k1_v0_4_0_ecmult(ctx, r, r, &szero, inp_g_sc); + rustsecp256k1_v0_4_1_scalar szero; + rustsecp256k1_v0_4_1_scalar_set_int(&szero, 0); + rustsecp256k1_v0_4_1_ecmult(ctx, r, r, &szero, inp_g_sc); return 1; } if (scratch == NULL) { - return rustsecp256k1_v0_4_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + return rustsecp256k1_v0_4_1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ - if (!rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_4_0_pippenger_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_4_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_4_1_pippenger_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_4_1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { - f = rustsecp256k1_v0_4_0_ecmult_pippenger_batch; + f = rustsecp256k1_v0_4_1_ecmult_pippenger_batch; } else { - if (!rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_4_0_strauss_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_4_0_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_4_1_strauss_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_4_1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } - f = rustsecp256k1_v0_4_0_ecmult_strauss_batch; + f = rustsecp256k1_v0_4_1_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; - rustsecp256k1_v0_4_0_gej tmp; + rustsecp256k1_v0_4_1_gej tmp; if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } - rustsecp256k1_v0_4_0_gej_add_var(r, r, &tmp, NULL); + rustsecp256k1_v0_4_1_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; diff --git a/secp256k1-sys/depend/secp256k1/src/field.h b/secp256k1-sys/depend/secp256k1/src/field.h index 0fd980b..bd1fd50 100644 --- a/secp256k1-sys/depend/secp256k1/src/field.h +++ b/secp256k1-sys/depend/secp256k1/src/field.h @@ -35,100 +35,91 @@ /** Normalize a field element. This brings the field element to a canonical representation, reduces * its magnitude to 1, and reduces it modulo field size `p`. */ -static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r); +static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r); /** Weakly normalize a field element: reduce its magnitude to 1, but don't fully normalize. */ -static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r); +static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r); /** Normalize a field element, without constant-time guarantee. */ -static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r); +static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r); -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r); +/** Verify whether a field element represents zero i.e. would normalize to a zero value. */ +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r); -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r); +/** Verify whether a field element represents zero i.e. would normalize to a zero value, + * without constant-time guarantee. */ +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r); /** Set a field element equal to a small integer. Resulting field element is normalized. */ -static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a); +static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a); /** Sets a field element equal to zero, initializing all fields. */ -static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a); /** Verify whether a field element is zero. Requires the input to be normalized. */ -static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a); +static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a); /** Check the "oddness" of a field element. Requires the input to be normalized. */ -static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a); +static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a); /** Compare two field elements. Requires magnitude-1 inputs. */ -static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b); +static int rustsecp256k1_v0_4_1_fe_equal(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b); -/** Same as rustsecp256k1_v0_4_0_fe_equal, but may be variable time. */ -static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b); +/** Same as rustsecp256k1_v0_4_1_fe_equal, but may be variable time. */ +static int rustsecp256k1_v0_4_1_fe_equal_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b); /** Compare two field elements. Requires both inputs to be normalized */ -static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b); +static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b); /** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */ -static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a); +static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a); /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a); /** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input * as an argument. The magnitude of the output is one higher. */ -static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m); +static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m); /** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that * small integer. */ -static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a); +static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a); /** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */ -static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a); /** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b); +static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b); /** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a); /** If a has a square root, it is computed in r and 1 is returned. If a does not * have a square root, the root of its negation is computed and 0 is returned. * The input's magnitude can be at most 8. The output magnitude is 1 (but not * guaranteed to be normalized). The result in r will always be a square * itself. */ -static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a); - -/** Checks whether a field element is a quadratic residue. */ -static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a); +static int rustsecp256k1_v0_4_1_fe_sqrt(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a); /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a); -/** Potentially faster version of rustsecp256k1_v0_4_0_fe_inv, without constant-time guarantee. */ -static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a); - -/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be - * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and - * outputs must not overlap in memory. */ -static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len); +/** Potentially faster version of rustsecp256k1_v0_4_1_fe_inv, without constant-time guarantee. */ +static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a); /** Convert a field element to the storage type. */ -static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a); +static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a); /** Convert a field element back from the storage type. */ -static void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a); +static void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag); +static void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag); +static void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag); #endif /* SECP256K1_FIELD_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26.h b/secp256k1-sys/depend/secp256k1/src/field_10x26.h index 9a6aea5..8a162b7 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} rustsecp256k1_v0_4_0_fe; +} rustsecp256k1_v0_4_1_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -42,7 +42,7 @@ typedef struct { typedef struct { uint32_t n[8]; -} rustsecp256k1_v0_4_0_fe_storage; +} rustsecp256k1_v0_4_1_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} #define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h index b98e17a..892c377 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h @@ -9,9 +9,10 @@ #include "util.h" #include "field.h" +#include "modinv32_impl.h" #ifdef VERIFY -static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_verify(const rustsecp256k1_v0_4_1_fe *a) { const uint32_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; r &= (d[0] <= 0x3FFFFFFUL * m); @@ -39,7 +40,7 @@ static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) { } #endif -static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -90,11 +91,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -121,11 +122,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -177,11 +178,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r) { +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -210,7 +211,7 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r) { +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r) { uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; uint32_t z0, z1; uint32_t x; @@ -262,34 +263,34 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_f return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a) { const uint32_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -300,13 +301,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_ } } -static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { +static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); - rustsecp256k1_v0_4_0_fe_verify(b); + rustsecp256k1_v0_4_1_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(b); #endif for (i = 9; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -319,7 +320,7 @@ static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, con return 0; } -static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a) { int ret; r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24); r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22); @@ -337,7 +338,7 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns r->magnitude = 1; if (ret) { r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); } else { r->normalized = 0; } @@ -346,10 +347,10 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r[0] = (a->n[9] >> 14) & 0xff; r[1] = (a->n[9] >> 6) & 0xff; @@ -385,10 +386,10 @@ static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k r[31] = a->n[0] & 0xff; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0]; r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1]; @@ -403,11 +404,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0 #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -421,13 +422,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_ #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -442,15 +443,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } #if defined(USE_EXTERNAL_ASM) /* External assembler implementation */ -void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); -void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); +void rustsecp256k1_v0_4_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); +void rustsecp256k1_v0_4_1_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else @@ -460,7 +461,7 @@ void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7; @@ -790,7 +791,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, cons /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint32_t *r, const uint32_t *a) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7; @@ -1065,37 +1066,37 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, cons } #endif -static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - rustsecp256k1_v0_4_0_fe_verify(a); - rustsecp256k1_v0_4_0_fe_verify(b); + rustsecp256k1_v0_4_1_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - rustsecp256k1_v0_4_0_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_4_1_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif - rustsecp256k1_v0_4_0_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_4_1_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint32_t)0); @@ -1118,7 +1119,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_f #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint32_t)0); @@ -1133,7 +1134,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_ r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); } -static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -1147,7 +1148,7 @@ static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage * r->n[7] = a->n[8] >> 16 | a->n[9] << 10; } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a) { r->n[0] = a->n[0] & 0x3FFFFFFUL; r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL); r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL); @@ -1164,4 +1165,92 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_ #endif } +static void rustsecp256k1_v0_4_1_fe_from_signed30(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a) { + const uint32_t M26 = UINT32_MAX >> 6; + const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], + a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; + + /* The output from rustsecp256k1_v0_4_1_modinv32{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). + */ + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->n[0] = a0 & M26; + r->n[1] = (a0 >> 26 | a1 << 4) & M26; + r->n[2] = (a1 >> 22 | a2 << 8) & M26; + r->n[3] = (a2 >> 18 | a3 << 12) & M26; + r->n[4] = (a3 >> 14 | a4 << 16) & M26; + r->n[5] = (a4 >> 10 | a5 << 20) & M26; + r->n[6] = (a5 >> 6 | a6 << 24) & M26; + r->n[7] = (a6 >> 2 ) & M26; + r->n[8] = (a6 >> 28 | a7 << 2) & M26; + r->n[9] = (a7 >> 24 | a8 << 6); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + rustsecp256k1_v0_4_1_fe_verify(r); +#endif +} + +static void rustsecp256k1_v0_4_1_fe_to_signed30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_fe *a) { + const uint32_t M30 = UINT32_MAX >> 2; + const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4], + a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r->v[0] = (a0 | a1 << 26) & M30; + r->v[1] = (a1 >> 4 | a2 << 22) & M30; + r->v[2] = (a2 >> 8 | a3 << 18) & M30; + r->v[3] = (a3 >> 12 | a4 << 14) & M30; + r->v[4] = (a4 >> 16 | a5 << 10) & M30; + r->v[5] = (a5 >> 20 | a6 << 6) & M30; + r->v[6] = (a6 >> 24 | a7 << 2 + | a8 << 28) & M30; + r->v[7] = (a8 >> 2 | a9 << 24) & M30; + r->v[8] = a9 >> 6; +} + +static const rustsecp256k1_v0_4_1_modinv32_modinfo rustsecp256k1_v0_4_1_const_modinfo_fe = { + {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}}, + 0x2DDACACFL +}; + +static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) { + rustsecp256k1_v0_4_1_fe tmp; + rustsecp256k1_v0_4_1_modinv32_signed30 s; + + tmp = *x; + rustsecp256k1_v0_4_1_fe_normalize(&tmp); + rustsecp256k1_v0_4_1_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_4_1_modinv32(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe); + rustsecp256k1_v0_4_1_fe_from_signed30(r, &s); + + VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp)); +} + +static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) { + rustsecp256k1_v0_4_1_fe tmp; + rustsecp256k1_v0_4_1_modinv32_signed30 s; + + tmp = *x; + rustsecp256k1_v0_4_1_fe_normalize_var(&tmp); + rustsecp256k1_v0_4_1_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_4_1_modinv32_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe); + rustsecp256k1_v0_4_1_fe_from_signed30(r, &s); + + VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp)); +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52.h b/secp256k1-sys/depend/secp256k1/src/field_5x52.h index 6a2243e..87ee05b 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} rustsecp256k1_v0_4_0_fe; +} rustsecp256k1_v0_4_1_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -37,7 +37,7 @@ typedef struct { typedef struct { uint64_t n[4]; -} rustsecp256k1_v0_4_0_fe_storage; +} rustsecp256k1_v0_4_1_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ (d0) | (((uint64_t)(d1)) << 32), \ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h index 750487b..afcb415 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h @@ -14,7 +14,7 @@ #ifndef SECP256K1_FIELD_INNER5X52_IMPL_H #define SECP256K1_FIELD_INNER5X52_IMPL_H -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c @@ -284,7 +284,7 @@ __asm__ __volatile__( ); } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h index ae698bc..7309dac 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h @@ -13,6 +13,7 @@ #include "util.h" #include "field.h" +#include "modinv64_impl.h" #if defined(USE_ASM_X86_64) #include "field_5x52_asm_impl.h" @@ -29,7 +30,7 @@ */ #ifdef VERIFY -static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_verify(const rustsecp256k1_v0_4_1_fe *a) { const uint64_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -50,7 +51,7 @@ static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) { } #endif -static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -89,11 +90,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -113,11 +114,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) { +static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -157,11 +158,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r) { +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ @@ -184,7 +185,7 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r) { +static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r) { uint64_t t0, t1, t2, t3, t4; uint64_t z0, z1; uint64_t x; @@ -225,34 +226,34 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_f return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a) { r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a) { const uint64_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -263,13 +264,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_ } } -static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { +static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); - rustsecp256k1_v0_4_0_fe_verify(b); + rustsecp256k1_v0_4_1_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(b); #endif for (i = 4; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -282,7 +283,7 @@ static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, con return 0; } -static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a) { int ret; r->n[0] = (uint64_t)a[31] | ((uint64_t)a[30] << 8) @@ -323,7 +324,7 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns r->magnitude = 1; if (ret) { r->normalized = 1; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); } else { r->normalized = 0; } @@ -332,10 +333,10 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r[0] = (a->n[4] >> 40) & 0xFF; r[1] = (a->n[4] >> 32) & 0xFF; @@ -371,10 +372,10 @@ static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k r[31] = a->n[0] & 0xFF; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0]; r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1]; @@ -384,11 +385,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0 #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -397,13 +398,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_ #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -413,41 +414,41 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - rustsecp256k1_v0_4_0_fe_verify(a); - rustsecp256k1_v0_4_0_fe_verify(b); + rustsecp256k1_v0_4_1_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - rustsecp256k1_v0_4_0_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_4_1_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - rustsecp256k1_v0_4_0_fe_verify(a); + rustsecp256k1_v0_4_1_fe_verify(a); #endif - rustsecp256k1_v0_4_0_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_4_1_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_4_0_fe_verify(r); + rustsecp256k1_v0_4_1_fe_verify(r); #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint64_t)0); @@ -465,7 +466,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_f #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint64_t)0); @@ -476,7 +477,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); } -static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a) { +static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -486,7 +487,7 @@ static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage * r->n[3] = a->n[3] >> 36 | a->n[4] << 16; } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a) { r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); @@ -498,4 +499,80 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_ #endif } +static void rustsecp256k1_v0_4_1_fe_from_signed62(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a) { + const uint64_t M52 = UINT64_MAX >> 12; + const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; + + /* The output from rustsecp256k1_v0_4_1_modinv64{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). + */ + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->n[0] = a0 & M52; + r->n[1] = (a0 >> 52 | a1 << 10) & M52; + r->n[2] = (a1 >> 42 | a2 << 20) & M52; + r->n[3] = (a2 >> 32 | a3 << 30) & M52; + r->n[4] = (a3 >> 22 | a4 << 40); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + rustsecp256k1_v0_4_1_fe_verify(r); +#endif +} + +static void rustsecp256k1_v0_4_1_fe_to_signed62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_fe *a) { + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r->v[0] = (a0 | a1 << 52) & M62; + r->v[1] = (a1 >> 10 | a2 << 42) & M62; + r->v[2] = (a2 >> 20 | a3 << 32) & M62; + r->v[3] = (a3 >> 30 | a4 << 22) & M62; + r->v[4] = a4 >> 40; +} + +static const rustsecp256k1_v0_4_1_modinv64_modinfo rustsecp256k1_v0_4_1_const_modinfo_fe = { + {{-0x1000003D1LL, 0, 0, 0, 256}}, + 0x27C7F6E22DDACACFLL +}; + +static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) { + rustsecp256k1_v0_4_1_fe tmp; + rustsecp256k1_v0_4_1_modinv64_signed62 s; + + tmp = *x; + rustsecp256k1_v0_4_1_fe_normalize(&tmp); + rustsecp256k1_v0_4_1_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_4_1_modinv64(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe); + rustsecp256k1_v0_4_1_fe_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp)); +#endif +} + +static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) { + rustsecp256k1_v0_4_1_fe tmp; + rustsecp256k1_v0_4_1_modinv64_signed62 s; + + tmp = *x; + rustsecp256k1_v0_4_1_fe_normalize_var(&tmp); + rustsecp256k1_v0_4_1_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_4_1_modinv64_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe); + rustsecp256k1_v0_4_1_fe_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp)); +#endif +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h index 7dcdce1..9c02bdc 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h @@ -15,7 +15,7 @@ #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { uint128_t c, d; uint64_t t3, t4, tx, u0; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; @@ -154,7 +154,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, cons /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { uint128_t c, d; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; int64_t t3, t4, tx, u0; diff --git a/secp256k1-sys/depend/secp256k1/src/field_impl.h b/secp256k1-sys/depend/secp256k1/src/field_impl.h index 6146568..48b2546 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_impl.h @@ -12,7 +12,6 @@ #endif #include "util.h" -#include "num.h" #if defined(SECP256K1_WIDEMUL_INT128) #include "field_5x52_impl.h" @@ -22,21 +21,21 @@ #error "Please select wide multiplication implementation" #endif -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { - rustsecp256k1_v0_4_0_fe na; - rustsecp256k1_v0_4_0_fe_negate(&na, a, 1); - rustsecp256k1_v0_4_0_fe_add(&na, b); - return rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_equal(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { + rustsecp256k1_v0_4_1_fe na; + rustsecp256k1_v0_4_1_fe_negate(&na, a, 1); + rustsecp256k1_v0_4_1_fe_add(&na, b); + return rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&na); } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { - rustsecp256k1_v0_4_0_fe na; - rustsecp256k1_v0_4_0_fe_negate(&na, a, 1); - rustsecp256k1_v0_4_0_fe_add(&na, b); - return rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_equal_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { + rustsecp256k1_v0_4_1_fe na; + rustsecp256k1_v0_4_1_fe_negate(&na, a, 1); + rustsecp256k1_v0_4_1_fe_add(&na, b); + return rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&na); } -static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { +static int rustsecp256k1_v0_4_1_fe_sqrt(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * @@ -46,7 +45,7 @@ static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustse * Also because (p+1)/4 is an even number, the computed square root is * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). */ - rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; + rustsecp256k1_v0_4_1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; VERIFY_CHECK(r != a); @@ -56,265 +55,86 @@ static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustse * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ - rustsecp256k1_v0_4_0_fe_sqr(&x2, a); - rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a); + rustsecp256k1_v0_4_1_fe_sqr(&x2, a); + rustsecp256k1_v0_4_1_fe_mul(&x2, &x2, a); - rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2); - rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a); + rustsecp256k1_v0_4_1_fe_sqr(&x3, &x2); + rustsecp256k1_v0_4_1_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6); + rustsecp256k1_v0_4_1_fe_sqr(&x6, &x6); } - rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3); + rustsecp256k1_v0_4_1_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9); + rustsecp256k1_v0_4_1_fe_sqr(&x9, &x9); } - rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3); + rustsecp256k1_v0_4_1_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11); + rustsecp256k1_v0_4_1_fe_sqr(&x11, &x11); } - rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2); + rustsecp256k1_v0_4_1_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22); + rustsecp256k1_v0_4_1_fe_sqr(&x22, &x22); } - rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11); + rustsecp256k1_v0_4_1_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44); + rustsecp256k1_v0_4_1_fe_sqr(&x44, &x44); } - rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22); + rustsecp256k1_v0_4_1_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88); + rustsecp256k1_v0_4_1_fe_sqr(&x88, &x88); } - rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44); + rustsecp256k1_v0_4_1_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176); + rustsecp256k1_v0_4_1_fe_sqr(&x176, &x176); } - rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88); + rustsecp256k1_v0_4_1_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220); + rustsecp256k1_v0_4_1_fe_sqr(&x220, &x220); } - rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44); + rustsecp256k1_v0_4_1_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223); + rustsecp256k1_v0_4_1_fe_sqr(&x223, &x223); } - rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3); + rustsecp256k1_v0_4_1_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22); + rustsecp256k1_v0_4_1_fe_mul(&t1, &t1, &x22); for (j=0; j<6; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2); - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); - rustsecp256k1_v0_4_0_fe_sqr(r, &t1); + rustsecp256k1_v0_4_1_fe_mul(&t1, &t1, &x2); + rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1); + rustsecp256k1_v0_4_1_fe_sqr(r, &t1); /* Check that a square root was actually calculated */ - rustsecp256k1_v0_4_0_fe_sqr(&t1, r); - return rustsecp256k1_v0_4_0_fe_equal(&t1, a); + rustsecp256k1_v0_4_1_fe_sqr(&t1, r); + return rustsecp256k1_v0_4_1_fe_equal(&t1, a); } -static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { - rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; - int j; - - /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in - * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] - */ - - rustsecp256k1_v0_4_0_fe_sqr(&x2, a); - rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a); - - rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2); - rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a); - - x6 = x3; - for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6); - } - rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3); - - x9 = x6; - for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9); - } - rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3); - - x11 = x9; - for (j=0; j<2; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11); - } - rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2); - - x22 = x11; - for (j=0; j<11; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22); - } - rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11); - - x44 = x22; - for (j=0; j<22; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44); - } - rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22); - - x88 = x44; - for (j=0; j<44; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88); - } - rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44); - - x176 = x88; - for (j=0; j<88; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176); - } - rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88); - - x220 = x176; - for (j=0; j<44; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220); - } - rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44); - - x223 = x220; - for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223); - } - rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3); - - /* The final result is then assembled using a sliding window over the blocks. */ - - t1 = x223; - for (j=0; j<23; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22); - for (j=0; j<5; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, a); - for (j=0; j<3; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2); - for (j=0; j<2; j++) { - rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1_v0_4_0_fe_mul(r, a, &t1); -} - -static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { -#if defined(USE_FIELD_INV_BUILTIN) - rustsecp256k1_v0_4_0_fe_inv(r, a); -#elif defined(USE_FIELD_INV_NUM) - rustsecp256k1_v0_4_0_num n, m; - static const rustsecp256k1_v0_4_0_fe negone = SECP256K1_FE_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL - ); - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - unsigned char b[32]; - int res; - rustsecp256k1_v0_4_0_fe c = *a; - rustsecp256k1_v0_4_0_fe_normalize_var(&c); - rustsecp256k1_v0_4_0_fe_get_b32(b, &c); - rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32); - rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m); - rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n); - res = rustsecp256k1_v0_4_0_fe_set_b32(r, b); - (void)res; - VERIFY_CHECK(res); - /* Verify the result is the (unique) valid inverse using non-GMP code. */ - rustsecp256k1_v0_4_0_fe_mul(&c, &c, r); - rustsecp256k1_v0_4_0_fe_add(&c, &negone); - CHECK(rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&c)); -#else -#error "Please select field inverse implementation" -#endif -} - -static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len) { - rustsecp256k1_v0_4_0_fe u; - size_t i; - if (len < 1) { - return; - } - - VERIFY_CHECK((r + len <= a) || (a + len <= r)); - - r[0] = a[0]; - - i = 0; - while (++i < len) { - rustsecp256k1_v0_4_0_fe_mul(&r[i], &r[i - 1], &a[i]); - } - - rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[--i]); - - while (i > 0) { - size_t j = i--; - rustsecp256k1_v0_4_0_fe_mul(&r[j], &r[i], &u); - rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[j]); - } - - r[0] = u; -} - -static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a) { -#ifndef USE_NUM_NONE - unsigned char b[32]; - rustsecp256k1_v0_4_0_num n; - rustsecp256k1_v0_4_0_num m; - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - - rustsecp256k1_v0_4_0_fe c = *a; - rustsecp256k1_v0_4_0_fe_normalize_var(&c); - rustsecp256k1_v0_4_0_fe_get_b32(b, &c); - rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32); - return rustsecp256k1_v0_4_0_num_jacobi(&n, &m) >= 0; -#else - rustsecp256k1_v0_4_0_fe r; - return rustsecp256k1_v0_4_0_fe_sqrt(&r, a); -#endif -} - -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); #endif /* SECP256K1_FIELD_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/gen_context.c b/secp256k1-sys/depend/secp256k1/src/gen_context.c index 39fd4ed..8d332da 100644 --- a/secp256k1-sys/depend/secp256k1/src/gen_context.c +++ b/secp256k1-sys/depend/secp256k1/src/gen_context.c @@ -4,15 +4,21 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -// Autotools creates libsecp256k1-config.h, of which ECMULT_GEN_PREC_BITS is needed. -// ifndef guard so downstream users can define their own if they do not use autotools. +/* Autotools creates libsecp256k1-config.h, of which ECMULT_GEN_PREC_BITS is needed. + ifndef guard so downstream users can define their own if they do not use autotools. */ #if !defined(ECMULT_GEN_PREC_BITS) #include "libsecp256k1-config.h" #endif -#define USE_BASIC_CONFIG 1 -#include "basic-config.h" -#include "include/secp256k1.h" +/* We can't require the precomputed tables when creating them. */ +#undef USE_ECMULT_STATIC_PRECOMPUTATION + +/* In principle we could use external ASM, but this yields only a minor speedup in + build time and it's very complicated. In particular when cross-compiling, we'd + need to build the external ASM for the build and the host machine. */ +#undef USE_EXTERNAL_ASM + +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "field_impl.h" @@ -26,13 +32,13 @@ static void default_error_callback_fn(const char* str, void* data) { abort(); } -static const rustsecp256k1_v0_4_0_callback default_error_callback = { +static const rustsecp256k1_v0_4_1_callback default_error_callback = { default_error_callback_fn, NULL }; int main(int argc, char **argv) { - rustsecp256k1_v0_4_0_ecmult_gen_context ctx; + rustsecp256k1_v0_4_1_ecmult_gen_context ctx; void *prealloc, *base; int inner; int outer; @@ -47,19 +53,19 @@ int main(int argc, char **argv) { return -1; } - fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); - fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); + fprintf(fp, "#ifndef SECP256K1_ECMULT_STATIC_CONTEXT_H\n"); + fprintf(fp, "#define SECP256K1_ECMULT_STATIC_CONTEXT_H\n"); fprintf(fp, "#include \"src/group.h\"\n"); fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n"); fprintf(fp, "#if ECMULT_GEN_PREC_N != %d || ECMULT_GEN_PREC_G != %d\n", ECMULT_GEN_PREC_N, ECMULT_GEN_PREC_G); fprintf(fp, " #error configuration mismatch, invalid ECMULT_GEN_PREC_N, ECMULT_GEN_PREC_G. Try deleting ecmult_static_context.h before the build.\n"); fprintf(fp, "#endif\n"); - fprintf(fp, "static const rustsecp256k1_v0_4_0_ge_storage rustsecp256k1_v0_4_0_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n"); + fprintf(fp, "static const rustsecp256k1_v0_4_1_ge_storage rustsecp256k1_v0_4_1_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n"); base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE); prealloc = base; - rustsecp256k1_v0_4_0_ecmult_gen_context_init(&ctx); - rustsecp256k1_v0_4_0_ecmult_gen_context_build(&ctx, &prealloc); + rustsecp256k1_v0_4_1_ecmult_gen_context_init(&ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_build(&ctx, &prealloc); for(outer = 0; outer != ECMULT_GEN_PREC_N; outer++) { fprintf(fp,"{\n"); for(inner = 0; inner != ECMULT_GEN_PREC_G; inner++) { @@ -77,7 +83,7 @@ int main(int argc, char **argv) { } } fprintf(fp,"};\n"); - rustsecp256k1_v0_4_0_ecmult_gen_context_clear(&ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_clear(&ctx); free(base); fprintf(fp, "#undef SC\n"); diff --git a/secp256k1-sys/depend/secp256k1/src/group.h b/secp256k1-sys/depend/secp256k1/src/group.h index 2553dbe..8b1fae8 100644 --- a/secp256k1-sys/depend/secp256k1/src/group.h +++ b/secp256k1-sys/depend/secp256k1/src/group.h @@ -7,135 +7,128 @@ #ifndef SECP256K1_GROUP_H #define SECP256K1_GROUP_H -#include "num.h" #include "field.h" /** A group element of the secp256k1 curve, in affine coordinates. */ typedef struct { - rustsecp256k1_v0_4_0_fe x; - rustsecp256k1_v0_4_0_fe y; + rustsecp256k1_v0_4_1_fe x; + rustsecp256k1_v0_4_1_fe y; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_4_0_ge; +} rustsecp256k1_v0_4_1_ge; #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} /** A group element of the secp256k1 curve, in jacobian coordinates. */ typedef struct { - rustsecp256k1_v0_4_0_fe x; /* actual X: x/z^2 */ - rustsecp256k1_v0_4_0_fe y; /* actual Y: y/z^3 */ - rustsecp256k1_v0_4_0_fe z; + rustsecp256k1_v0_4_1_fe x; /* actual X: x/z^2 */ + rustsecp256k1_v0_4_1_fe y; /* actual Y: y/z^3 */ + rustsecp256k1_v0_4_1_fe z; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_4_0_gej; +} rustsecp256k1_v0_4_1_gej; #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} typedef struct { - rustsecp256k1_v0_4_0_fe_storage x; - rustsecp256k1_v0_4_0_fe_storage y; -} rustsecp256k1_v0_4_0_ge_storage; + rustsecp256k1_v0_4_1_fe_storage x; + rustsecp256k1_v0_4_1_fe_storage y; +} rustsecp256k1_v0_4_1_ge_storage; #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} #define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) /** Set a group element equal to the point with given X and Y coordinates */ -static void rustsecp256k1_v0_4_0_ge_set_xy(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_fe *y); - -/** Set a group element (affine) equal to the point with the given X coordinate - * and a Y coordinate that is a quadratic residue modulo p. The return value - * is true iff a coordinate with the given X coordinate exists. - */ -static int rustsecp256k1_v0_4_0_ge_set_xquad(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x); +static void rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_fe *y); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ -static int rustsecp256k1_v0_4_0_ge_set_xo_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, int odd); +static int rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, int odd); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_4_0_ge_is_infinity(const rustsecp256k1_v0_4_0_ge *a); +static int rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ -static int rustsecp256k1_v0_4_0_ge_is_valid_var(const rustsecp256k1_v0_4_0_ge *a); +static int rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_4_0_ge_neg(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a); +static void rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a); -/** Set a group element equal to another which is given in jacobian coordinates */ -static void rustsecp256k1_v0_4_0_ge_set_gej(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a); +/** Set a group element equal to another which is given in jacobian coordinates. Constant time. */ +static void rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a); + +/** Set a group element equal to another which is given in jacobian coordinates. */ +static void rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, size_t len); +static void rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y * coordinates of the result are stored in r, the common z coordinate is * stored in globalz. */ -static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_fe *globalz, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zr); +static void rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zr); /** Set a group element (affine) equal to the point at infinity. */ -static void rustsecp256k1_v0_4_0_ge_set_infinity(rustsecp256k1_v0_4_0_ge *r); +static void rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge *r); /** Set a group element (jacobian) equal to the point at infinity. */ -static void rustsecp256k1_v0_4_0_gej_set_infinity(rustsecp256k1_v0_4_0_gej *r); +static void rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej *r); /** Set a group element (jacobian) equal to another which is given in affine coordinates. */ -static void rustsecp256k1_v0_4_0_gej_set_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a); +static void rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a); /** Compare the X coordinate of a group element (jacobian). */ -static int rustsecp256k1_v0_4_0_gej_eq_x_var(const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_gej *a); +static int rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_gej *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_4_0_gej_neg(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a); +static void rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_4_0_gej_is_infinity(const rustsecp256k1_v0_4_0_gej *a); - -/** Check whether a group element's y coordinate is a quadratic residue. */ -static int rustsecp256k1_v0_4_0_gej_has_quad_y_var(const rustsecp256k1_v0_4_0_gej *a); +static int rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej *a); /** Set r equal to the double of a. Constant time. */ -static void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a); +static void rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a); /** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */ -static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, rustsecp256k1_v0_4_0_fe *rzr); +static void rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, rustsecp256k1_v0_4_1_fe *rzr); /** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_gej *b, rustsecp256k1_v0_4_0_fe *rzr); +static void rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b, rustsecp256k1_v0_4_1_fe *rzr); /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ -static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b); +static void rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b); /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient - than rustsecp256k1_v0_4_0_gej_add_var. It is identical to rustsecp256k1_v0_4_0_gej_add_ge but without constant-time + than rustsecp256k1_v0_4_1_gej_add_var. It is identical to rustsecp256k1_v0_4_1_gej_add_ge but without constant-time guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_4_0_gej_add_ge_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, rustsecp256k1_v0_4_0_fe *rzr); +static void rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, rustsecp256k1_v0_4_1_fe *rzr); /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ -static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, const rustsecp256k1_v0_4_0_fe *bzinv); +static void rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, const rustsecp256k1_v0_4_1_fe *bzinv); /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ -static void rustsecp256k1_v0_4_0_ge_mul_lambda(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a); +static void rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a); -/** Clear a rustsecp256k1_v0_4_0_gej to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_4_0_gej_clear(rustsecp256k1_v0_4_0_gej *r); +/** Clear a rustsecp256k1_v0_4_1_gej to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej *r); -/** Clear a rustsecp256k1_v0_4_0_ge to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_4_0_ge_clear(rustsecp256k1_v0_4_0_ge *r); +/** Clear a rustsecp256k1_v0_4_1_ge to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge *r); /** Convert a group element to the storage type. */ -static void rustsecp256k1_v0_4_0_ge_to_storage(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge *a); +static void rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge *a); /** Convert a group element back from the storage type. */ -static void rustsecp256k1_v0_4_0_ge_from_storage(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge_storage *a); +static void rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_4_0_ge_storage_cmov(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge_storage *a, int flag); +static void rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge_storage *a, int flag); /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ -static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_fe *b); +static void rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_fe *b); /** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve. * @@ -146,6 +139,6 @@ static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this * function checks whether a point that is on the curve is in fact also in that subgroup. */ -static int rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_0_ge* ge); +static int rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge* ge); #endif /* SECP256K1_GROUP_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/group_impl.h b/secp256k1-sys/depend/secp256k1/src/group_impl.h index fbf909d..2df30ef 100644 --- a/secp256k1-sys/depend/secp256k1/src/group_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/group_impl.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_GROUP_IMPL_H #define SECP256K1_GROUP_IMPL_H -#include "num.h" #include "field.h" #include "group.h" @@ -22,24 +21,24 @@ */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 13 -static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST( 0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f, 0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb, 0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2, 0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24 ); -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST( 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc, 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417 ); # elif EXHAUSTIVE_TEST_ORDER == 199 -static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST( 0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9, 0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18, 0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c, 0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae ); -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST( 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1, 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb ); @@ -50,83 +49,84 @@ static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1 /** Generator for secp256k1, value 'g' defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ -static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST( +static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST( 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); -static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); +static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); #endif -static void rustsecp256k1_v0_4_0_ge_set_gej_zinv(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zi) { - rustsecp256k1_v0_4_0_fe zi2; - rustsecp256k1_v0_4_0_fe zi3; - rustsecp256k1_v0_4_0_fe_sqr(&zi2, zi); - rustsecp256k1_v0_4_0_fe_mul(&zi3, &zi2, zi); - rustsecp256k1_v0_4_0_fe_mul(&r->x, &a->x, &zi2); - rustsecp256k1_v0_4_0_fe_mul(&r->y, &a->y, &zi3); +static void rustsecp256k1_v0_4_1_ge_set_gej_zinv(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zi) { + rustsecp256k1_v0_4_1_fe zi2; + rustsecp256k1_v0_4_1_fe zi3; + rustsecp256k1_v0_4_1_fe_sqr(&zi2, zi); + rustsecp256k1_v0_4_1_fe_mul(&zi3, &zi2, zi); + rustsecp256k1_v0_4_1_fe_mul(&r->x, &a->x, &zi2); + rustsecp256k1_v0_4_1_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; } -static void rustsecp256k1_v0_4_0_ge_set_xy(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_fe *y) { +static void rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_fe *y) { r->infinity = 0; r->x = *x; r->y = *y; } -static int rustsecp256k1_v0_4_0_ge_is_infinity(const rustsecp256k1_v0_4_0_ge *a) { +static int rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge *a) { return a->infinity; } -static void rustsecp256k1_v0_4_0_ge_neg(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a) { +static void rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) { *r = *a; - rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y); + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1); } -static void rustsecp256k1_v0_4_0_ge_set_gej(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_fe z2, z3; +static void rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_fe z2, z3; r->infinity = a->infinity; - rustsecp256k1_v0_4_0_fe_inv(&a->z, &a->z); - rustsecp256k1_v0_4_0_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_4_0_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_4_0_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_4_0_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_4_0_fe_set_int(&a->z, 1); + rustsecp256k1_v0_4_1_fe_inv(&a->z, &a->z); + rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } -static void rustsecp256k1_v0_4_0_ge_set_gej_var(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_fe z2, z3; - r->infinity = a->infinity; +static void rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_fe z2, z3; if (a->infinity) { + rustsecp256k1_v0_4_1_ge_set_infinity(r); return; } - rustsecp256k1_v0_4_0_fe_inv_var(&a->z, &a->z); - rustsecp256k1_v0_4_0_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_4_0_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_4_0_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_4_0_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_4_0_fe_set_int(&a->z, 1); - r->x = a->x; - r->y = a->y; + rustsecp256k1_v0_4_1_fe_inv_var(&a->z, &a->z); + rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1); + rustsecp256k1_v0_4_1_ge_set_xy(r, &a->x, &a->y); } -static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, size_t len) { - rustsecp256k1_v0_4_0_fe u; +static void rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, size_t len) { + rustsecp256k1_v0_4_1_fe u; size_t i; size_t last_i = SIZE_MAX; for (i = 0; i < len; i++) { - if (!a[i].infinity) { + if (a[i].infinity) { + rustsecp256k1_v0_4_1_ge_set_infinity(&r[i]); + } else { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; } else { - rustsecp256k1_v0_4_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); + rustsecp256k1_v0_4_1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); } last_i = i; } @@ -134,14 +134,14 @@ static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, if (last_i == SIZE_MAX) { return; } - rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[last_i].x); + rustsecp256k1_v0_4_1_fe_inv_var(&u, &r[last_i].x); i = last_i; while (i > 0) { i--; if (!a[i].infinity) { - rustsecp256k1_v0_4_0_fe_mul(&r[last_i].x, &r[i].x, &u); - rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[last_i].z); + rustsecp256k1_v0_4_1_fe_mul(&r[last_i].x, &r[i].x, &u); + rustsecp256k1_v0_4_1_fe_mul(&u, &u, &a[last_i].z); last_i = i; } } @@ -149,23 +149,22 @@ static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, r[last_i].x = u; for (i = 0; i < len; i++) { - r[i].infinity = a[i].infinity; if (!a[i].infinity) { - rustsecp256k1_v0_4_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); + rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } } -static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_fe *globalz, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zr) { +static void rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zr) { size_t i = len - 1; - rustsecp256k1_v0_4_0_fe zs; + rustsecp256k1_v0_4_1_fe zs; if (len > 0) { /* The z of the final point gives us the "global Z" for the table. */ r[i].x = a[i].x; r[i].y = a[i].y; /* Ensure all y values are in weak normal form for fast negation of points */ - rustsecp256k1_v0_4_0_fe_normalize_weak(&r[i].y); + rustsecp256k1_v0_4_1_fe_normalize_weak(&r[i].y); *globalz = a[i].z; r[i].infinity = 0; zs = zr[i]; @@ -173,104 +172,100 @@ static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp25 /* Work our way backwards, using the z-ratios to scale the x/y values. */ while (i > 0) { if (i != len - 1) { - rustsecp256k1_v0_4_0_fe_mul(&zs, &zs, &zr[i]); + rustsecp256k1_v0_4_1_fe_mul(&zs, &zs, &zr[i]); } i--; - rustsecp256k1_v0_4_0_ge_set_gej_zinv(&r[i], &a[i], &zs); + rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &zs); } } } -static void rustsecp256k1_v0_4_0_gej_set_infinity(rustsecp256k1_v0_4_0_gej *r) { +static void rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej *r) { r->infinity = 1; - rustsecp256k1_v0_4_0_fe_clear(&r->x); - rustsecp256k1_v0_4_0_fe_clear(&r->y); - rustsecp256k1_v0_4_0_fe_clear(&r->z); + rustsecp256k1_v0_4_1_fe_clear(&r->x); + rustsecp256k1_v0_4_1_fe_clear(&r->y); + rustsecp256k1_v0_4_1_fe_clear(&r->z); } -static void rustsecp256k1_v0_4_0_ge_set_infinity(rustsecp256k1_v0_4_0_ge *r) { +static void rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge *r) { r->infinity = 1; - rustsecp256k1_v0_4_0_fe_clear(&r->x); - rustsecp256k1_v0_4_0_fe_clear(&r->y); + rustsecp256k1_v0_4_1_fe_clear(&r->x); + rustsecp256k1_v0_4_1_fe_clear(&r->y); } -static void rustsecp256k1_v0_4_0_gej_clear(rustsecp256k1_v0_4_0_gej *r) { +static void rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej *r) { r->infinity = 0; - rustsecp256k1_v0_4_0_fe_clear(&r->x); - rustsecp256k1_v0_4_0_fe_clear(&r->y); - rustsecp256k1_v0_4_0_fe_clear(&r->z); + rustsecp256k1_v0_4_1_fe_clear(&r->x); + rustsecp256k1_v0_4_1_fe_clear(&r->y); + rustsecp256k1_v0_4_1_fe_clear(&r->z); } -static void rustsecp256k1_v0_4_0_ge_clear(rustsecp256k1_v0_4_0_ge *r) { +static void rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge *r) { r->infinity = 0; - rustsecp256k1_v0_4_0_fe_clear(&r->x); - rustsecp256k1_v0_4_0_fe_clear(&r->y); + rustsecp256k1_v0_4_1_fe_clear(&r->x); + rustsecp256k1_v0_4_1_fe_clear(&r->y); } -static int rustsecp256k1_v0_4_0_ge_set_xquad(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x) { - rustsecp256k1_v0_4_0_fe x2, x3; +static int rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, int odd) { + rustsecp256k1_v0_4_1_fe x2, x3; r->x = *x; - rustsecp256k1_v0_4_0_fe_sqr(&x2, x); - rustsecp256k1_v0_4_0_fe_mul(&x3, x, &x2); + rustsecp256k1_v0_4_1_fe_sqr(&x2, x); + rustsecp256k1_v0_4_1_fe_mul(&x3, x, &x2); r->infinity = 0; - rustsecp256k1_v0_4_0_fe_add(&x3, &rustsecp256k1_v0_4_0_fe_const_b); - return rustsecp256k1_v0_4_0_fe_sqrt(&r->y, &x3); -} - -static int rustsecp256k1_v0_4_0_ge_set_xo_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, int odd) { - if (!rustsecp256k1_v0_4_0_ge_set_xquad(r, x)) { + rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b); + if (!rustsecp256k1_v0_4_1_fe_sqrt(&r->y, &x3)) { return 0; } - rustsecp256k1_v0_4_0_fe_normalize_var(&r->y); - if (rustsecp256k1_v0_4_0_fe_is_odd(&r->y) != odd) { - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_4_1_fe_normalize_var(&r->y); + if (rustsecp256k1_v0_4_1_fe_is_odd(&r->y) != odd) { + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1); } return 1; } -static void rustsecp256k1_v0_4_0_gej_set_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a) { +static void rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; - rustsecp256k1_v0_4_0_fe_set_int(&r->z, 1); + rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1); } -static int rustsecp256k1_v0_4_0_gej_eq_x_var(const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_fe r, r2; +static int rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_gej *a) { + rustsecp256k1_v0_4_1_fe r, r2; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_4_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_4_0_fe_mul(&r, &r, x); - r2 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&r2); - return rustsecp256k1_v0_4_0_fe_equal_var(&r, &r2); + rustsecp256k1_v0_4_1_fe_sqr(&r, &a->z); rustsecp256k1_v0_4_1_fe_mul(&r, &r, x); + r2 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&r2); + return rustsecp256k1_v0_4_1_fe_equal_var(&r, &r2); } -static void rustsecp256k1_v0_4_0_gej_neg(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a) { +static void rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; r->z = a->z; - rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y); + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1); } -static int rustsecp256k1_v0_4_0_gej_is_infinity(const rustsecp256k1_v0_4_0_gej *a) { +static int rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej *a) { return a->infinity; } -static int rustsecp256k1_v0_4_0_ge_is_valid_var(const rustsecp256k1_v0_4_0_ge *a) { - rustsecp256k1_v0_4_0_fe y2, x3; +static int rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge *a) { + rustsecp256k1_v0_4_1_fe y2, x3; if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ - rustsecp256k1_v0_4_0_fe_sqr(&y2, &a->y); - rustsecp256k1_v0_4_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, &a->x); - rustsecp256k1_v0_4_0_fe_add(&x3, &rustsecp256k1_v0_4_0_fe_const_b); - rustsecp256k1_v0_4_0_fe_normalize_weak(&x3); - return rustsecp256k1_v0_4_0_fe_equal_var(&y2, &x3); + rustsecp256k1_v0_4_1_fe_sqr(&y2, &a->y); + rustsecp256k1_v0_4_1_fe_sqr(&x3, &a->x); rustsecp256k1_v0_4_1_fe_mul(&x3, &x3, &a->x); + rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b); + rustsecp256k1_v0_4_1_fe_normalize_weak(&x3); + return rustsecp256k1_v0_4_1_fe_equal_var(&y2, &x3); } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) { /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. * * Note that there is an implementation described at @@ -278,33 +273,33 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_ * which trades a multiply for a square, but in practice this is actually slower, * mainly because it requires more normalizations. */ - rustsecp256k1_v0_4_0_fe t1,t2,t3,t4; + rustsecp256k1_v0_4_1_fe t1,t2,t3,t4; r->infinity = a->infinity; - rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &a->y); - rustsecp256k1_v0_4_0_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ - rustsecp256k1_v0_4_0_fe_sqr(&t1, &a->x); - rustsecp256k1_v0_4_0_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ - rustsecp256k1_v0_4_0_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ - rustsecp256k1_v0_4_0_fe_sqr(&t3, &a->y); - rustsecp256k1_v0_4_0_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ - rustsecp256k1_v0_4_0_fe_sqr(&t4, &t3); - rustsecp256k1_v0_4_0_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ - rustsecp256k1_v0_4_0_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &a->y); + rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ + rustsecp256k1_v0_4_1_fe_sqr(&t1, &a->x); + rustsecp256k1_v0_4_1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ + rustsecp256k1_v0_4_1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ + rustsecp256k1_v0_4_1_fe_sqr(&t3, &a->y); + rustsecp256k1_v0_4_1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ + rustsecp256k1_v0_4_1_fe_sqr(&t4, &t3); + rustsecp256k1_v0_4_1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ + rustsecp256k1_v0_4_1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ r->x = t3; - rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ - rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ - rustsecp256k1_v0_4_0_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ - rustsecp256k1_v0_4_0_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ - rustsecp256k1_v0_4_0_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ - rustsecp256k1_v0_4_0_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ - rustsecp256k1_v0_4_0_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ - rustsecp256k1_v0_4_0_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ - rustsecp256k1_v0_4_0_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ + rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ + rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ + rustsecp256k1_v0_4_1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ + rustsecp256k1_v0_4_1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ + rustsecp256k1_v0_4_1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ + rustsecp256k1_v0_4_1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ + rustsecp256k1_v0_4_1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ + rustsecp256k1_v0_4_1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ + rustsecp256k1_v0_4_1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ } -static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, rustsecp256k1_v0_4_0_fe *rzr) { +static void rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, rustsecp256k1_v0_4_1_fe *rzr) { /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. @@ -316,25 +311,25 @@ static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, con * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { - r->infinity = 1; + rustsecp256k1_v0_4_1_gej_set_infinity(r); if (rzr != NULL) { - rustsecp256k1_v0_4_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_4_1_fe_set_int(rzr, 1); } return; } if (rzr != NULL) { *rzr = a->y; - rustsecp256k1_v0_4_0_fe_normalize_weak(rzr); - rustsecp256k1_v0_4_0_fe_mul_int(rzr, 2); + rustsecp256k1_v0_4_1_fe_normalize_weak(rzr); + rustsecp256k1_v0_4_1_fe_mul_int(rzr, 2); } - rustsecp256k1_v0_4_0_gej_double(r, a); + rustsecp256k1_v0_4_1_gej_double(r, a); } -static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_gej *b, rustsecp256k1_v0_4_0_fe *rzr) { +static void rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b, rustsecp256k1_v0_4_1_fe *rzr) { /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_4_0_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_4_1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); @@ -344,112 +339,112 @@ static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_4_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_4_1_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; - rustsecp256k1_v0_4_0_fe_sqr(&z22, &b->z); - rustsecp256k1_v0_4_0_fe_sqr(&z12, &a->z); - rustsecp256k1_v0_4_0_fe_mul(&u1, &a->x, &z22); - rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12); - rustsecp256k1_v0_4_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_4_0_fe_mul(&s1, &s1, &b->z); - rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2); - rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_4_0_gej_double_var(r, a, rzr); + rustsecp256k1_v0_4_1_fe_sqr(&z22, &b->z); + rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z); + rustsecp256k1_v0_4_1_fe_mul(&u1, &a->x, &z22); + rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_4_1_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_4_1_fe_mul(&s1, &s1, &b->z); + rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2); + rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2); + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_4_0_fe_set_int(rzr, 0); + rustsecp256k1_v0_4_1_fe_set_int(rzr, 0); } - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); } return; } - rustsecp256k1_v0_4_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_4_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2); - rustsecp256k1_v0_4_0_fe_mul(&h, &h, &b->z); + rustsecp256k1_v0_4_1_fe_sqr(&i2, &i); + rustsecp256k1_v0_4_1_fe_sqr(&h2, &h); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2); + rustsecp256k1_v0_4_1_fe_mul(&h, &h, &b->z); if (rzr != NULL) { *rzr = h; } - rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_4_0_fe_add(&r->y, &h3); + rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2); + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_4_1_fe_add(&r->y, &h3); } -static void rustsecp256k1_v0_4_0_gej_add_ge_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, rustsecp256k1_v0_4_0_fe *rzr) { +static void rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, rustsecp256k1_v0_4_1_fe *rzr) { /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_4_0_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_4_1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); - rustsecp256k1_v0_4_0_gej_set_ge(r, b); + rustsecp256k1_v0_4_1_gej_set_ge(r, b); return; } if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_4_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_4_1_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; - rustsecp256k1_v0_4_0_fe_sqr(&z12, &a->z); - u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1); - rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12); - s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1); - rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2); - rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_4_0_gej_double_var(r, a, rzr); + rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z); + u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1); + rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1); + rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2); + rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2); + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_4_0_fe_set_int(rzr, 0); + rustsecp256k1_v0_4_1_fe_set_int(rzr, 0); } - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); } return; } - rustsecp256k1_v0_4_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_4_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2); + rustsecp256k1_v0_4_1_fe_sqr(&i2, &i); + rustsecp256k1_v0_4_1_fe_sqr(&h2, &h); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2); if (rzr != NULL) { *rzr = h; } - rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_4_0_fe_add(&r->y, &h3); + rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h); + rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2); + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_4_1_fe_add(&r->y, &h3); } -static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, const rustsecp256k1_v0_4_0_fe *bzinv) { +static void rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, const rustsecp256k1_v0_4_1_fe *bzinv) { /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_4_0_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + rustsecp256k1_v0_4_1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (b->infinity) { *r = *a; return; } if (a->infinity) { - rustsecp256k1_v0_4_0_fe bzinv2, bzinv3; + rustsecp256k1_v0_4_1_fe bzinv2, bzinv3; r->infinity = b->infinity; - rustsecp256k1_v0_4_0_fe_sqr(&bzinv2, bzinv); - rustsecp256k1_v0_4_0_fe_mul(&bzinv3, &bzinv2, bzinv); - rustsecp256k1_v0_4_0_fe_mul(&r->x, &b->x, &bzinv2); - rustsecp256k1_v0_4_0_fe_mul(&r->y, &b->y, &bzinv3); - rustsecp256k1_v0_4_0_fe_set_int(&r->z, 1); + rustsecp256k1_v0_4_1_fe_sqr(&bzinv2, bzinv); + rustsecp256k1_v0_4_1_fe_mul(&bzinv3, &bzinv2, bzinv); + rustsecp256k1_v0_4_1_fe_mul(&r->x, &b->x, &bzinv2); + rustsecp256k1_v0_4_1_fe_mul(&r->y, &b->y, &bzinv3); + rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1); return; } r->infinity = 0; @@ -462,40 +457,40 @@ static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, c * The variable az below holds the modified Z coordinate for a, which is used * for the computation of rx and ry, but not for rz. */ - rustsecp256k1_v0_4_0_fe_mul(&az, &a->z, bzinv); + rustsecp256k1_v0_4_1_fe_mul(&az, &a->z, bzinv); - rustsecp256k1_v0_4_0_fe_sqr(&z12, &az); - u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1); - rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12); - s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1); - rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &az); - rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2); - rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_4_0_gej_double_var(r, a, NULL); + rustsecp256k1_v0_4_1_fe_sqr(&z12, &az); + u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1); + rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1); + rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &az); + rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2); + rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2); + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_4_1_gej_double_var(r, a, NULL); } else { - rustsecp256k1_v0_4_0_gej_set_infinity(r); + rustsecp256k1_v0_4_1_gej_set_infinity(r); } return; } - rustsecp256k1_v0_4_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_4_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2); - r->z = a->z; rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, &h); - rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_4_0_fe_add(&r->y, &h3); + rustsecp256k1_v0_4_1_fe_sqr(&i2, &i); + rustsecp256k1_v0_4_1_fe_sqr(&h2, &h); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2); + r->z = a->z; rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &h); + rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2); + r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2); + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i); + rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1); + rustsecp256k1_v0_4_1_fe_add(&r->y, &h3); } -static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b) { +static void rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b) { /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ - static const rustsecp256k1_v0_4_0_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_4_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; - rustsecp256k1_v0_4_0_fe m_alt, rr_alt; + static const rustsecp256k1_v0_4_1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_4_1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; + rustsecp256k1_v0_4_1_fe m_alt, rr_alt; int infinity, degenerate; VERIFY_CHECK(!b->infinity); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); @@ -550,139 +545,125 @@ static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const r * so this covers everything. */ - rustsecp256k1_v0_4_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */ - u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ - rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ - s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ - rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ - rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ - t = u1; rustsecp256k1_v0_4_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ - m = s1; rustsecp256k1_v0_4_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ - rustsecp256k1_v0_4_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */ - rustsecp256k1_v0_4_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ - rustsecp256k1_v0_4_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ - rustsecp256k1_v0_4_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ + rustsecp256k1_v0_4_1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ + u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ + s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ + t = u1; rustsecp256k1_v0_4_1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ + m = s1; rustsecp256k1_v0_4_1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ + rustsecp256k1_v0_4_1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ + rustsecp256k1_v0_4_1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ + rustsecp256k1_v0_4_1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ + rustsecp256k1_v0_4_1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" * case that Z = z1z2 = 0, and this is special-cased later on). */ - degenerate = rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&m) & - rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&rr); + degenerate = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&m) & + rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&rr); /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. * This means either x1 == beta*x2 or beta*x1 == x2, where beta is * a nontrivial cube root of one. In either case, an alternate * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), * so we set R/M equal to this. */ rr_alt = s1; - rustsecp256k1_v0_4_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ - rustsecp256k1_v0_4_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ + rustsecp256k1_v0_4_1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ + rustsecp256k1_v0_4_1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ - rustsecp256k1_v0_4_0_fe_cmov(&rr_alt, &rr, !degenerate); - rustsecp256k1_v0_4_0_fe_cmov(&m_alt, &m, !degenerate); + rustsecp256k1_v0_4_1_fe_cmov(&rr_alt, &rr, !degenerate); + rustsecp256k1_v0_4_1_fe_cmov(&m_alt, &m, !degenerate); /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. * From here on out Ralt and Malt represent the numerator * and denominator of lambda; R and M represent the explicit * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ - rustsecp256k1_v0_4_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ - rustsecp256k1_v0_4_0_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ + rustsecp256k1_v0_4_1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ /* These two lines use the observation that either M == Malt or M == 0, * so M^3 * Malt is either Malt^4 (which is computed by squaring), or * zero (which is "computed" by cmov). So the cost is one squaring * versus two multiplications. */ - rustsecp256k1_v0_4_0_fe_sqr(&n, &n); - rustsecp256k1_v0_4_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ - rustsecp256k1_v0_4_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ - rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ - infinity = rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); - rustsecp256k1_v0_4_0_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ - rustsecp256k1_v0_4_0_fe_negate(&q, &q, 1); /* q = -Q (2) */ - rustsecp256k1_v0_4_0_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ - rustsecp256k1_v0_4_0_fe_normalize_weak(&t); + rustsecp256k1_v0_4_1_fe_sqr(&n, &n); + rustsecp256k1_v0_4_1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ + rustsecp256k1_v0_4_1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ + rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ + infinity = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&r->z) & ~a->infinity; + rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ + rustsecp256k1_v0_4_1_fe_negate(&q, &q, 1); /* q = -Q (2) */ + rustsecp256k1_v0_4_1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ + rustsecp256k1_v0_4_1_fe_normalize_weak(&t); r->x = t; /* r->x = Ralt^2-Q (1) */ - rustsecp256k1_v0_4_0_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ - rustsecp256k1_v0_4_0_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ - rustsecp256k1_v0_4_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ - rustsecp256k1_v0_4_0_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ - rustsecp256k1_v0_4_0_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ - rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ - rustsecp256k1_v0_4_0_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ + rustsecp256k1_v0_4_1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ + rustsecp256k1_v0_4_1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ + rustsecp256k1_v0_4_1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ + rustsecp256k1_v0_4_1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ + rustsecp256k1_v0_4_1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ + rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y); + rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ + rustsecp256k1_v0_4_1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ - rustsecp256k1_v0_4_0_fe_cmov(&r->x, &b->x, a->infinity); - rustsecp256k1_v0_4_0_fe_cmov(&r->y, &b->y, a->infinity); - rustsecp256k1_v0_4_0_fe_cmov(&r->z, &fe_1, a->infinity); + rustsecp256k1_v0_4_1_fe_cmov(&r->x, &b->x, a->infinity); + rustsecp256k1_v0_4_1_fe_cmov(&r->y, &b->y, a->infinity); + rustsecp256k1_v0_4_1_fe_cmov(&r->z, &fe_1, a->infinity); r->infinity = infinity; } -static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_fe *s) { +static void rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_fe *s) { /* Operations: 4 mul, 1 sqr */ - rustsecp256k1_v0_4_0_fe zz; - VERIFY_CHECK(!rustsecp256k1_v0_4_0_fe_is_zero(s)); - rustsecp256k1_v0_4_0_fe_sqr(&zz, s); - rustsecp256k1_v0_4_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ - rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &zz); - rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ - rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */ + rustsecp256k1_v0_4_1_fe zz; + VERIFY_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(s)); + rustsecp256k1_v0_4_1_fe_sqr(&zz, s); + rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ + rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &zz); + rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ + rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ } -static void rustsecp256k1_v0_4_0_ge_to_storage(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge *a) { - rustsecp256k1_v0_4_0_fe x, y; +static void rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge *a) { + rustsecp256k1_v0_4_1_fe x, y; VERIFY_CHECK(!a->infinity); x = a->x; - rustsecp256k1_v0_4_0_fe_normalize(&x); + rustsecp256k1_v0_4_1_fe_normalize(&x); y = a->y; - rustsecp256k1_v0_4_0_fe_normalize(&y); - rustsecp256k1_v0_4_0_fe_to_storage(&r->x, &x); - rustsecp256k1_v0_4_0_fe_to_storage(&r->y, &y); + rustsecp256k1_v0_4_1_fe_normalize(&y); + rustsecp256k1_v0_4_1_fe_to_storage(&r->x, &x); + rustsecp256k1_v0_4_1_fe_to_storage(&r->y, &y); } -static void rustsecp256k1_v0_4_0_ge_from_storage(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge_storage *a) { - rustsecp256k1_v0_4_0_fe_from_storage(&r->x, &a->x); - rustsecp256k1_v0_4_0_fe_from_storage(&r->y, &a->y); +static void rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge_storage *a) { + rustsecp256k1_v0_4_1_fe_from_storage(&r->x, &a->x); + rustsecp256k1_v0_4_1_fe_from_storage(&r->y, &a->y); r->infinity = 0; } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_ge_storage_cmov(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge_storage *a, int flag) { - rustsecp256k1_v0_4_0_fe_storage_cmov(&r->x, &a->x, flag); - rustsecp256k1_v0_4_0_fe_storage_cmov(&r->y, &a->y, flag); +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge_storage *a, int flag) { + rustsecp256k1_v0_4_1_fe_storage_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r->y, &a->y, flag); } -static void rustsecp256k1_v0_4_0_ge_mul_lambda(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a) { - static const rustsecp256k1_v0_4_0_fe beta = SECP256K1_FE_CONST( +static void rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) { + static const rustsecp256k1_v0_4_1_fe beta = SECP256K1_FE_CONST( 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul ); *r = *a; - rustsecp256k1_v0_4_0_fe_mul(&r->x, &r->x, &beta); + rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &beta); } -static int rustsecp256k1_v0_4_0_gej_has_quad_y_var(const rustsecp256k1_v0_4_0_gej *a) { - rustsecp256k1_v0_4_0_fe yz; - - if (a->infinity) { - return 0; - } - - /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as - * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z - is */ - rustsecp256k1_v0_4_0_fe_mul(&yz, &a->y, &a->z); - return rustsecp256k1_v0_4_0_fe_is_quad_var(&yz); -} - -static int rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_0_ge* ge) { +static int rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER - rustsecp256k1_v0_4_0_gej out; + rustsecp256k1_v0_4_1_gej out; int i; /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */ - rustsecp256k1_v0_4_0_gej_set_infinity(&out); + rustsecp256k1_v0_4_1_gej_set_infinity(&out); for (i = 0; i < 32; ++i) { - rustsecp256k1_v0_4_0_gej_double_var(&out, &out, NULL); + rustsecp256k1_v0_4_1_gej_double_var(&out, &out, NULL); if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) { - rustsecp256k1_v0_4_0_gej_add_ge_var(&out, &out, ge, NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&out, &out, ge, NULL); } } - return rustsecp256k1_v0_4_0_gej_is_infinity(&out); + return rustsecp256k1_v0_4_1_gej_is_infinity(&out); #else (void)ge; /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash.h b/secp256k1-sys/depend/secp256k1/src/hash.h index 4d018c2..9897a55 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash.h +++ b/secp256k1-sys/depend/secp256k1/src/hash.h @@ -14,28 +14,28 @@ typedef struct { uint32_t s[8]; uint32_t buf[16]; /* In big endian */ size_t bytes; -} rustsecp256k1_v0_4_0_sha256; +} rustsecp256k1_v0_4_1_sha256; -static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 *hash); -static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_4_1_sha256_initialize(rustsecp256k1_v0_4_1_sha256 *hash); +static void rustsecp256k1_v0_4_1_sha256_write(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_4_1_sha256_finalize(rustsecp256k1_v0_4_1_sha256 *hash, unsigned char *out32); typedef struct { - rustsecp256k1_v0_4_0_sha256 inner, outer; -} rustsecp256k1_v0_4_0_hmac_sha256; + rustsecp256k1_v0_4_1_sha256 inner, outer; +} rustsecp256k1_v0_4_1_hmac_sha256; -static void rustsecp256k1_v0_4_0_hmac_sha256_initialize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *key, size_t size); -static void rustsecp256k1_v0_4_0_hmac_sha256_write(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_4_0_hmac_sha256_finalize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_4_1_hmac_sha256_initialize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *key, size_t size); +static void rustsecp256k1_v0_4_1_hmac_sha256_write(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_4_1_hmac_sha256_finalize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, unsigned char *out32); typedef struct { unsigned char v[32]; unsigned char k[32]; int retry; -} rustsecp256k1_v0_4_0_rfc6979_hmac_sha256; +} rustsecp256k1_v0_4_1_rfc6979_hmac_sha256; -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng); +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng); #endif /* SECP256K1_HASH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash_impl.h b/secp256k1-sys/depend/secp256k1/src/hash_impl.h index 4406992..83d12eb 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/hash_impl.h @@ -34,7 +34,7 @@ #define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #endif -static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 *hash) { +static void rustsecp256k1_v0_4_1_sha256_initialize(rustsecp256k1_v0_4_1_sha256 *hash) { hash->s[0] = 0x6a09e667ul; hash->s[1] = 0xbb67ae85ul; hash->s[2] = 0x3c6ef372ul; @@ -47,7 +47,7 @@ static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 * } /** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ -static void rustsecp256k1_v0_4_0_sha256_transform(uint32_t* s, const uint32_t* chunk) { +static void rustsecp256k1_v0_4_1_sha256_transform(uint32_t* s, const uint32_t* chunk) { uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; @@ -129,7 +129,7 @@ static void rustsecp256k1_v0_4_0_sha256_transform(uint32_t* s, const uint32_t* c s[7] += h; } -static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *data, size_t len) { +static void rustsecp256k1_v0_4_1_sha256_write(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *data, size_t len) { size_t bufsize = hash->bytes & 0x3F; hash->bytes += len; VERIFY_CHECK(hash->bytes >= len); @@ -139,7 +139,7 @@ static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len); data += chunk_len; len -= chunk_len; - rustsecp256k1_v0_4_0_sha256_transform(hash->s, hash->buf); + rustsecp256k1_v0_4_1_sha256_transform(hash->s, hash->buf); bufsize = 0; } if (len) { @@ -148,15 +148,15 @@ static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, } } -static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_4_1_sha256_finalize(rustsecp256k1_v0_4_1_sha256 *hash, unsigned char *out32) { static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t sizedesc[2]; uint32_t out[8]; int i = 0; sizedesc[0] = BE32(hash->bytes >> 29); sizedesc[1] = BE32(hash->bytes << 3); - rustsecp256k1_v0_4_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); - rustsecp256k1_v0_4_0_sha256_write(hash, (const unsigned char*)sizedesc, 8); + rustsecp256k1_v0_4_1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); + rustsecp256k1_v0_4_1_sha256_write(hash, (const unsigned char*)sizedesc, 8); for (i = 0; i < 8; i++) { out[i] = BE32(hash->s[i]); hash->s[i] = 0; @@ -166,60 +166,60 @@ static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *ha /* Initializes a sha256 struct and writes the 64 byte string * SHA256(tag)||SHA256(tag) into it. */ -static void rustsecp256k1_v0_4_0_sha256_initialize_tagged(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *tag, size_t taglen) { +static void rustsecp256k1_v0_4_1_sha256_initialize_tagged(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *tag, size_t taglen) { unsigned char buf[32]; - rustsecp256k1_v0_4_0_sha256_initialize(hash); - rustsecp256k1_v0_4_0_sha256_write(hash, tag, taglen); - rustsecp256k1_v0_4_0_sha256_finalize(hash, buf); + rustsecp256k1_v0_4_1_sha256_initialize(hash); + rustsecp256k1_v0_4_1_sha256_write(hash, tag, taglen); + rustsecp256k1_v0_4_1_sha256_finalize(hash, buf); - rustsecp256k1_v0_4_0_sha256_initialize(hash); - rustsecp256k1_v0_4_0_sha256_write(hash, buf, 32); - rustsecp256k1_v0_4_0_sha256_write(hash, buf, 32); + rustsecp256k1_v0_4_1_sha256_initialize(hash); + rustsecp256k1_v0_4_1_sha256_write(hash, buf, 32); + rustsecp256k1_v0_4_1_sha256_write(hash, buf, 32); } -static void rustsecp256k1_v0_4_0_hmac_sha256_initialize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { +static void rustsecp256k1_v0_4_1_hmac_sha256_initialize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { size_t n; unsigned char rkey[64]; if (keylen <= sizeof(rkey)) { memcpy(rkey, key, keylen); memset(rkey + keylen, 0, sizeof(rkey) - keylen); } else { - rustsecp256k1_v0_4_0_sha256 sha256; - rustsecp256k1_v0_4_0_sha256_initialize(&sha256); - rustsecp256k1_v0_4_0_sha256_write(&sha256, key, keylen); - rustsecp256k1_v0_4_0_sha256_finalize(&sha256, rkey); + rustsecp256k1_v0_4_1_sha256 sha256; + rustsecp256k1_v0_4_1_sha256_initialize(&sha256); + rustsecp256k1_v0_4_1_sha256_write(&sha256, key, keylen); + rustsecp256k1_v0_4_1_sha256_finalize(&sha256, rkey); memset(rkey + 32, 0, 32); } - rustsecp256k1_v0_4_0_sha256_initialize(&hash->outer); + rustsecp256k1_v0_4_1_sha256_initialize(&hash->outer); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c; } - rustsecp256k1_v0_4_0_sha256_write(&hash->outer, rkey, sizeof(rkey)); + rustsecp256k1_v0_4_1_sha256_write(&hash->outer, rkey, sizeof(rkey)); - rustsecp256k1_v0_4_0_sha256_initialize(&hash->inner); + rustsecp256k1_v0_4_1_sha256_initialize(&hash->inner); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c ^ 0x36; } - rustsecp256k1_v0_4_0_sha256_write(&hash->inner, rkey, sizeof(rkey)); + rustsecp256k1_v0_4_1_sha256_write(&hash->inner, rkey, sizeof(rkey)); memset(rkey, 0, sizeof(rkey)); } -static void rustsecp256k1_v0_4_0_hmac_sha256_write(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *data, size_t size) { - rustsecp256k1_v0_4_0_sha256_write(&hash->inner, data, size); +static void rustsecp256k1_v0_4_1_hmac_sha256_write(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *data, size_t size) { + rustsecp256k1_v0_4_1_sha256_write(&hash->inner, data, size); } -static void rustsecp256k1_v0_4_0_hmac_sha256_finalize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_4_1_hmac_sha256_finalize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, unsigned char *out32) { unsigned char temp[32]; - rustsecp256k1_v0_4_0_sha256_finalize(&hash->inner, temp); - rustsecp256k1_v0_4_0_sha256_write(&hash->outer, temp, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&hash->inner, temp); + rustsecp256k1_v0_4_1_sha256_write(&hash->outer, temp, 32); memset(temp, 0, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&hash->outer, out32); + rustsecp256k1_v0_4_1_sha256_finalize(&hash->outer, out32); } -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { - rustsecp256k1_v0_4_0_hmac_sha256 hmac; +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { + rustsecp256k1_v0_4_1_hmac_sha256 hmac; static const unsigned char zero[1] = {0x00}; static const unsigned char one[1] = {0x01}; @@ -227,47 +227,47 @@ static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0 memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ /* RFC6979 3.2.d. */ - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v); /* RFC6979 3.2.f. */ - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, one, 1); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, one, 1); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v); rng->retry = 0; } -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { /* RFC6979 3.2.h. */ static const unsigned char zero[1] = {0x00}; if (rng->retry) { - rustsecp256k1_v0_4_0_hmac_sha256 hmac; - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_4_1_hmac_sha256 hmac; + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v); } while (outlen > 0) { - rustsecp256k1_v0_4_0_hmac_sha256 hmac; + rustsecp256k1_v0_4_1_hmac_sha256 hmac; int now = outlen; - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v); if (now > 32) { now = 32; } @@ -279,7 +279,7 @@ static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4 rng->retry = 1; } -static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng) { +static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng) { memset(rng->k, 0, 32); memset(rng->v, 0, 32); rng->retry = 0; diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32.h b/secp256k1-sys/depend/secp256k1/src/modinv32.h new file mode 100644 index 0000000..03863e9 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/modinv32.h @@ -0,0 +1,42 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV32_H +#define SECP256K1_MODINV32_H + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include "util.h" + +/* A signed 30-bit limb representation of integers. + * + * Its value is sum(v[i] * 2^(30*i), i=0..8). */ +typedef struct { + int32_t v[9]; +} rustsecp256k1_v0_4_1_modinv32_signed30; + +typedef struct { + /* The modulus in signed30 notation, must be odd and in [3, 2^256]. */ + rustsecp256k1_v0_4_1_modinv32_signed30 modulus; + + /* modulus^{-1} mod 2^30 */ + uint32_t modulus_inv30; +} rustsecp256k1_v0_4_1_modinv32_modinfo; + +/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). + * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of + * x and modulus must be 1). These rules are automatically satisfied if the modulus is prime. + * + * On output, all of x's limbs will be in [0, 2^30). + */ +static void rustsecp256k1_v0_4_1_modinv32_var(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo); + +/* Same as rustsecp256k1_v0_4_1_modinv32_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_4_1_modinv32(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo); + +#endif /* SECP256K1_MODINV32_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h new file mode 100644 index 0000000..cfbe56c --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h @@ -0,0 +1,587 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV32_IMPL_H +#define SECP256K1_MODINV32_IMPL_H + +#include "modinv32.h" + +#include "util.h" + +#include + +/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. + * + * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an + * implementation for N=30, using 30-bit signed limbs represented as int32_t. + */ + +#ifdef VERIFY +static const rustsecp256k1_v0_4_1_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; + +/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */ +static void rustsecp256k1_v0_4_1_modinv32_mul_30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a, int alen, int32_t factor) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int64_t c = 0; + int i; + for (i = 0; i < 8; ++i) { + if (i < alen) c += (int64_t)a->v[i] * factor; + r->v[i] = (int32_t)c & M30; c >>= 30; + } + if (8 < alen) c += (int64_t)a->v[8] * factor; + VERIFY_CHECK(c == (int32_t)c); + r->v[8] = (int32_t)c; +} + +/* Return -1 for ab*factor. A consists of alen limbs; b has 9. */ +static int rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(const rustsecp256k1_v0_4_1_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_4_1_modinv32_signed30 *b, int32_t factor) { + int i; + rustsecp256k1_v0_4_1_modinv32_signed30 am, bm; + rustsecp256k1_v0_4_1_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_4_1_modinv32_mul_30(&bm, b, 9, factor); + for (i = 0; i < 8; ++i) { + /* Verify that all but the top limb of a and b are normalized. */ + VERIFY_CHECK(am.v[i] >> 30 == 0); + VERIFY_CHECK(bm.v[i] >> 30 == 0); + } + for (i = 8; i >= 0; --i) { + if (am.v[i] < bm.v[i]) return -1; + if (am.v[i] > bm.v[i]) return 1; + } + return 0; +} +#endif + +/* Take as input a signed30 number in range (-2*modulus,modulus), and add a multiple of the modulus + * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the + * process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range + * [0,2^30). */ +static void rustsecp256k1_v0_4_1_modinv32_normalize_30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4], + r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8]; + int32_t cond_add, cond_negate; + +#ifdef VERIFY + /* Verify that all limbs are in range (-2^30,2^30). */ + int i; + for (i = 0; i < 9; ++i) { + VERIFY_CHECK(r->v[i] >= -M30); + VERIFY_CHECK(r->v[i] <= M30); + } + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif + + /* In a first step, add the modulus if the input is negative, and then negate if requested. + * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input + * limbs are in range (-2^30,2^30), this cannot overflow an int32_t. Note that the right + * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is + * indeed the behavior of the right shift operator). */ + cond_add = r8 >> 31; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + r5 += modinfo->modulus.v[5] & cond_add; + r6 += modinfo->modulus.v[6] & cond_add; + r7 += modinfo->modulus.v[7] & cond_add; + r8 += modinfo->modulus.v[8] & cond_add; + cond_negate = sign >> 31; + r0 = (r0 ^ cond_negate) - cond_negate; + r1 = (r1 ^ cond_negate) - cond_negate; + r2 = (r2 ^ cond_negate) - cond_negate; + r3 = (r3 ^ cond_negate) - cond_negate; + r4 = (r4 ^ cond_negate) - cond_negate; + r5 = (r5 ^ cond_negate) - cond_negate; + r6 = (r6 ^ cond_negate) - cond_negate; + r7 = (r7 ^ cond_negate) - cond_negate; + r8 = (r8 ^ cond_negate) - cond_negate; + /* Propagate the top bits, to bring limbs back to range (-2^30,2^30). */ + r1 += r0 >> 30; r0 &= M30; + r2 += r1 >> 30; r1 &= M30; + r3 += r2 >> 30; r2 &= M30; + r4 += r3 >> 30; r3 &= M30; + r5 += r4 >> 30; r4 &= M30; + r6 += r5 >> 30; r5 &= M30; + r7 += r6 >> 30; r6 &= M30; + r8 += r7 >> 30; r7 &= M30; + + /* In a second step add the modulus again if the result is still negative, bringing r to range + * [0,modulus). */ + cond_add = r8 >> 31; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + r5 += modinfo->modulus.v[5] & cond_add; + r6 += modinfo->modulus.v[6] & cond_add; + r7 += modinfo->modulus.v[7] & cond_add; + r8 += modinfo->modulus.v[8] & cond_add; + /* And propagate again. */ + r1 += r0 >> 30; r0 &= M30; + r2 += r1 >> 30; r1 &= M30; + r3 += r2 >> 30; r2 &= M30; + r4 += r3 >> 30; r3 &= M30; + r5 += r4 >> 30; r4 &= M30; + r6 += r5 >> 30; r5 &= M30; + r7 += r6 >> 30; r6 &= M30; + r8 += r7 >> 30; r7 &= M30; + + r->v[0] = r0; + r->v[1] = r1; + r->v[2] = r2; + r->v[3] = r3; + r->v[4] = r4; + r->v[5] = r5; + r->v[6] = r6; + r->v[7] = r7; + r->v[8] = r8; + +#ifdef VERIFY + VERIFY_CHECK(r0 >> 30 == 0); + VERIFY_CHECK(r1 >> 30 == 0); + VERIFY_CHECK(r2 >> 30 == 0); + VERIFY_CHECK(r3 >> 30 == 0); + VERIFY_CHECK(r4 >> 30 == 0); + VERIFY_CHECK(r5 >> 30 == 0); + VERIFY_CHECK(r6 >> 30 == 0); + VERIFY_CHECK(r7 >> 30 == 0); + VERIFY_CHECK(r8 >> 30 == 0); + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif +} + +/* Data type for transition matrices (see section 3 of explanation). + * + * t = [ u v ] + * [ q r ] + */ +typedef struct { + int32_t u, v, q, r; +} rustsecp256k1_v0_4_1_modinv32_trans2x2; + +/* Compute the transition matrix and zeta for 30 divsteps. + * + * Input: zeta: initial zeta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final zeta + * + * Implements the divsteps_n_matrix function from the explanation. + */ +static int32_t rustsecp256k1_v0_4_1_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) { + /* u,v,q,r are the elements of the transformation matrix being built up, + * starting with the identity matrix. Semantically they are signed integers + * in range [-2^30,2^30], but here represented as unsigned mod 2^32. This + * permits left shifting (which is UB for negative numbers). The range + * being inside [-2^31,2^31) means that casting to signed works correctly. + */ + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 30; ++i) { + VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + /* Compute conditional masks for (zeta < 0) and for (g & 1). */ + c1 = zeta >> 31; + c2 = -(g & 1); + /* Compute x,y,z, conditionally negated versions of f,u,v. */ + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + /* Conditionally add x,y,z to g,q,r. */ + g += x & c2; + q += y & c2; + r += z & c2; + /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */ + c1 &= c2; + /* Conditionally change zeta into -zeta-2 or zeta-1. */ + zeta = (zeta ^ c1) - 1; + /* Conditionally add g,q,r to f,u,v. */ + f += g & c1; + u += q & c1; + v += r & c1; + /* Shifts */ + g >>= 1; + u <<= 1; + v <<= 1; + /* Bounds on zeta that follow from the bounds on iteration count (max 20*30 divsteps). */ + VERIFY_CHECK(zeta >= -601 && zeta <= 601); + } + /* Return data in t and return value. */ + t->u = (int32_t)u; + t->v = (int32_t)v; + t->q = (int32_t)q; + t->r = (int32_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 30 of them will have determinant 2^30. */ + VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); + return zeta; +} + +/* Compute the transition matrix and eta for 30 divsteps (variable time). + * + * Input: eta: initial eta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final eta + * + * Implements the divsteps_n_matrix_var function from the explanation. + */ +static int32_t rustsecp256k1_v0_4_1_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) { + /* inv256[i] = -(2*i+1)^-1 (mod 256) */ + static const uint8_t inv256[128] = { + 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, + 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, + 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, + 0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61, + 0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9, + 0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91, + 0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9, + 0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1, + 0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19, + 0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1, + 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 + }; + + /* Transformation matrix; see comments in rustsecp256k1_v0_4_1_modinv32_divsteps_30. */ + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t f = f0, g = g0, m; + uint16_t w; + int i = 30, limit, zeros; + + for (;;) { + /* Use a sentinel bit to count zeros only up to i. */ + zeros = rustsecp256k1_v0_4_1_ctz32_var(g | (UINT32_MAX << i)); + /* Perform zeros divsteps at once; they all just divide g by two. */ + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + /* We're done once we've done 30 divsteps. */ + if (i == 0) break; + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i)); + /* Bounds on eta that follow from the bounds on iteration count (max 25*30 divsteps). */ + VERIFY_CHECK(eta >= -751 && eta <= 751); + /* If eta is negative, negate it and replace f,g with g,-f. */ + if (eta < 0) { + uint32_t tmp; + eta = -eta; + tmp = f; f = g; g = -tmp; + tmp = u; u = q; q = -tmp; + tmp = v; v = r; r = -tmp; + } + /* eta is now >= 0. In what follows we're going to cancel out the bottom bits of g. No more + * than i can be cancelled out (as we'd be done before that point), and no more than eta+1 + * can be done as its sign will flip once that happens. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + /* m is a mask for the bottom min(limit, 8) bits (our table only supports 8 bits). */ + VERIFY_CHECK(limit > 0 && limit <= 30); + m = (UINT32_MAX >> (32 - limit)) & 255U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */ + w = (g * inv256[(f >> 1) & 127]) & m; + /* Do so. */ + g += f * w; + q += u * w; + r += v * w; + VERIFY_CHECK((g & m) == 0); + } + /* Return data in t and return value. */ + t->u = (int32_t)u; + t->v = (int32_t)v; + t->q = (int32_t)q; + t->r = (int32_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 30 of them will have determinant 2^30. */ + VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); + return eta; +} + +/* Compute (t/2^30) * [d, e] mod modulus, where t is a transition matrix for 30 divsteps. + * + * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range + * (-2^30,2^30). + * + * This implements the update_de function from the explanation. + */ +static void rustsecp256k1_v0_4_1_modinv32_update_de_30(rustsecp256k1_v0_4_1_modinv32_signed30 *d, rustsecp256k1_v0_4_1_modinv32_signed30 *e, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t, const rustsecp256k1_v0_4_1_modinv32_modinfo* modinfo) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t di, ei, md, me, sd, se; + int64_t cd, ce; + int i; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */ + VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */ +#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ + sd = d->v[8] >> 31; + se = e->v[8] >> 31; + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + /* Begin computing t*[d,e]. */ + di = d->v[0]; + ei = e->v[0]; + cd = (int64_t)u * di + (int64_t)v * ei; + ce = (int64_t)q * di + (int64_t)r * ei; + /* Correct md,me so that t*[d,e]+modulus*[md,me] has 30 zero bottom bits. */ + md -= (modinfo->modulus_inv30 * (uint32_t)cd + md) & M30; + me -= (modinfo->modulus_inv30 * (uint32_t)ce + me) & M30; + /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ + cd += (int64_t)modinfo->modulus.v[0] * md; + ce += (int64_t)modinfo->modulus.v[0] * me; + /* Verify that the low 30 bits of the computation are indeed zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30; + VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30; + /* Now iteratively compute limb i=1..8 of t*[d,e]+modulus*[md,me], and store them in output + * limb i-1 (shifting down by 30 bits). */ + for (i = 1; i < 9; ++i) { + di = d->v[i]; + ei = e->v[i]; + cd += (int64_t)u * di + (int64_t)v * ei; + ce += (int64_t)q * di + (int64_t)r * ei; + cd += (int64_t)modinfo->modulus.v[i] * md; + ce += (int64_t)modinfo->modulus.v[i] * me; + d->v[i - 1] = (int32_t)cd & M30; cd >>= 30; + e->v[i - 1] = (int32_t)ce & M30; ce >>= 30; + } + /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ + d->v[8] = (int32_t)cd; + e->v[8] = (int32_t)ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ +#endif +} + +/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1_v0_4_1_modinv32_update_fg_30(rustsecp256k1_v0_4_1_modinv32_signed30 *f, rustsecp256k1_v0_4_1_modinv32_signed30 *g, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t fi, gi; + int64_t cf, cg; + int i; + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + /* Verify that the bottom 30 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + /* Now iteratively compute limb i=1..8 of t*[f,g], and store them in output limb i-1 (shifting + * down by 30 bits). */ + for (i = 1; i < 9; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; + g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + /* What remains is limb 9 of t*[f,g]; store it as output limb 8. */ + f->v[8] = (int32_t)cf; + g->v[8] = (int32_t)cg; +} + +/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation in modinv64_impl.h. + */ +static void rustsecp256k1_v0_4_1_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_4_1_modinv32_signed30 *f, rustsecp256k1_v0_4_1_modinv32_signed30 *g, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t fi, gi; + int64_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 30 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; + g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int32_t)cf; + g->v[len - 1] = (int32_t)cg; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ +static void rustsecp256k1_v0_4_1_modinv32(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ + rustsecp256k1_v0_4_1_modinv32_signed30 d = {{0}}; + rustsecp256k1_v0_4_1_modinv32_signed30 e = {{1}}; + rustsecp256k1_v0_4_1_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_4_1_modinv32_signed30 g = *x; + int i; + int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */ + + /* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */ + for (i = 0; i < 20; ++i) { + /* Compute transition matrix and new zeta after 30 divsteps. */ + rustsecp256k1_v0_4_1_modinv32_trans2x2 t; + zeta = rustsecp256k1_v0_4_1_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1_v0_4_1_modinv32_update_de_30(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1_v0_4_1_modinv32_update_fg_30(&f, &g, &t); +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1_v0_4_1_modinv32_normalize_30(&d, f.v[8], modinfo); + *x = d; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ +static void rustsecp256k1_v0_4_1_modinv32_var(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ + rustsecp256k1_v0_4_1_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_4_1_modinv32_signed30 g = *x; +#ifdef VERIFY + int i = 0; +#endif + int j, len = 9; + int32_t eta = -1; /* eta = -delta; delta is initially 1 (faster for the variable-time code) */ + int32_t cond, fn, gn; + + /* Do iterations of 30 divsteps each until g=0. */ + while (1) { + /* Compute transition matrix and new eta after 30 divsteps. */ + rustsecp256k1_v0_4_1_modinv32_trans2x2 t; + eta = rustsecp256k1_v0_4_1_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1_v0_4_1_modinv32_update_de_30(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1_v0_4_1_modinv32_update_fg_30_var(len, &f, &g, &t); + /* If the bottom limb of g is 0, there is a chance g=0. */ + if (g.v[0] == 0) { + cond = 0; + /* Check if all other limbs are also 0. */ + for (j = 1; j < len; ++j) { + cond |= g.v[j]; + } + /* If so, we're done. */ + if (cond == 0) break; + } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int32_t)len - 2) >> 31; + cond |= fn ^ (fn >> 31); + cond |= gn ^ (gn >> 31); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint32_t)fn << 30; + g.v[len - 2] |= (uint32_t)gn << 30; + --len; + } +#ifdef VERIFY + VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1_v0_4_1_modinv32_normalize_30(&d, f.v[len - 1], modinfo); + *x = d; +} + +#endif /* SECP256K1_MODINV32_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64.h b/secp256k1-sys/depend/secp256k1/src/modinv64.h new file mode 100644 index 0000000..5eb9f3d --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/modinv64.h @@ -0,0 +1,46 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV64_H +#define SECP256K1_MODINV64_H + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include "util.h" + +#ifndef SECP256K1_WIDEMUL_INT128 +#error "modinv64 requires 128-bit wide multiplication support" +#endif + +/* A signed 62-bit limb representation of integers. + * + * Its value is sum(v[i] * 2^(62*i), i=0..4). */ +typedef struct { + int64_t v[5]; +} rustsecp256k1_v0_4_1_modinv64_signed62; + +typedef struct { + /* The modulus in signed62 notation, must be odd and in [3, 2^256]. */ + rustsecp256k1_v0_4_1_modinv64_signed62 modulus; + + /* modulus^{-1} mod 2^62 */ + uint64_t modulus_inv62; +} rustsecp256k1_v0_4_1_modinv64_modinfo; + +/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). + * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of + * x and modulus must be 1). These rules are automatically satisfied if the modulus is prime. + * + * On output, all of x's limbs will be in [0, 2^62). + */ +static void rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo); + +/* Same as rustsecp256k1_v0_4_1_modinv64_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo); + +#endif /* SECP256K1_MODINV64_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h new file mode 100644 index 0000000..9bcf89c --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h @@ -0,0 +1,593 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV64_IMPL_H +#define SECP256K1_MODINV64_IMPL_H + +#include "modinv64.h" + +#include "util.h" + +/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. + * + * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an + * implementation for N=62, using 62-bit signed limbs represented as int64_t. + */ + +#ifdef VERIFY +/* Helper function to compute the absolute value of an int64_t. + * (we don't use abs/labs/llabs as it depends on the int sizes). */ +static int64_t rustsecp256k1_v0_4_1_modinv64_abs(int64_t v) { + VERIFY_CHECK(v > INT64_MIN); + if (v < 0) return -v; + return v; +} + +static const rustsecp256k1_v0_4_1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; + +/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ +static void rustsecp256k1_v0_4_1_modinv64_mul_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, int64_t factor) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int128_t c = 0; + int i; + for (i = 0; i < 4; ++i) { + if (i < alen) c += (int128_t)a->v[i] * factor; + r->v[i] = (int64_t)c & M62; c >>= 62; + } + if (4 < alen) c += (int128_t)a->v[4] * factor; + VERIFY_CHECK(c == (int64_t)c); + r->v[4] = (int64_t)c; +} + +/* Return -1 for ab*factor. A has alen limbs; b has 5. */ +static int rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_4_1_modinv64_signed62 *b, int64_t factor) { + int i; + rustsecp256k1_v0_4_1_modinv64_signed62 am, bm; + rustsecp256k1_v0_4_1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_4_1_modinv64_mul_62(&bm, b, 5, factor); + for (i = 0; i < 4; ++i) { + /* Verify that all but the top limb of a and b are normalized. */ + VERIFY_CHECK(am.v[i] >> 62 == 0); + VERIFY_CHECK(bm.v[i] >> 62 == 0); + } + for (i = 4; i >= 0; --i) { + if (am.v[i] < bm.v[i]) return -1; + if (am.v[i] > bm.v[i]) return 1; + } + return 0; +} +#endif + +/* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus + * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the + * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range + * [0,2^62). */ +static void rustsecp256k1_v0_4_1_modinv64_normalize_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; + int64_t cond_add, cond_negate; + +#ifdef VERIFY + /* Verify that all limbs are in range (-2^62,2^62). */ + int i; + for (i = 0; i < 5; ++i) { + VERIFY_CHECK(r->v[i] >= -M62); + VERIFY_CHECK(r->v[i] <= M62); + } + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif + + /* In a first step, add the modulus if the input is negative, and then negate if requested. + * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input + * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right + * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is + * indeed the behavior of the right shift operator). */ + cond_add = r4 >> 63; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + cond_negate = sign >> 63; + r0 = (r0 ^ cond_negate) - cond_negate; + r1 = (r1 ^ cond_negate) - cond_negate; + r2 = (r2 ^ cond_negate) - cond_negate; + r3 = (r3 ^ cond_negate) - cond_negate; + r4 = (r4 ^ cond_negate) - cond_negate; + /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */ + r1 += r0 >> 62; r0 &= M62; + r2 += r1 >> 62; r1 &= M62; + r3 += r2 >> 62; r2 &= M62; + r4 += r3 >> 62; r3 &= M62; + + /* In a second step add the modulus again if the result is still negative, bringing + * r to range [0,modulus). */ + cond_add = r4 >> 63; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + /* And propagate again. */ + r1 += r0 >> 62; r0 &= M62; + r2 += r1 >> 62; r1 &= M62; + r3 += r2 >> 62; r2 &= M62; + r4 += r3 >> 62; r3 &= M62; + + r->v[0] = r0; + r->v[1] = r1; + r->v[2] = r2; + r->v[3] = r3; + r->v[4] = r4; + +#ifdef VERIFY + VERIFY_CHECK(r0 >> 62 == 0); + VERIFY_CHECK(r1 >> 62 == 0); + VERIFY_CHECK(r2 >> 62 == 0); + VERIFY_CHECK(r3 >> 62 == 0); + VERIFY_CHECK(r4 >> 62 == 0); + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif +} + +/* Data type for transition matrices (see section 3 of explanation). + * + * t = [ u v ] + * [ q r ] + */ +typedef struct { + int64_t u, v, q, r; +} rustsecp256k1_v0_4_1_modinv64_trans2x2; + +/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). + * Note that the transformation matrix is scaled by 2^62 and not 2^59. + * + * Input: zeta: initial zeta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final zeta + * + * Implements the divsteps_n_matrix function from the explanation. + */ +static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) { + /* u,v,q,r are the elements of the transformation matrix being built up, + * starting with the identity matrix times 8 (because the caller expects + * a result scaled by 2^62). Semantically they are signed integers + * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This + * permits left shifting (which is UB for negative numbers). The range + * being inside [-2^63,2^63) means that casting to signed works correctly. + */ + uint64_t u = 8, v = 0, q = 0, r = 8; + uint64_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 3; i < 62; ++i) { + VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + /* Compute conditional masks for (zeta < 0) and for (g & 1). */ + c1 = zeta >> 63; + c2 = -(g & 1); + /* Compute x,y,z, conditionally negated versions of f,u,v. */ + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + /* Conditionally add x,y,z to g,q,r. */ + g += x & c2; + q += y & c2; + r += z & c2; + /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */ + c1 &= c2; + /* Conditionally change zeta into -zeta-2 or zeta-1. */ + zeta = (zeta ^ c1) - 1; + /* Conditionally add g,q,r to f,u,v. */ + f += g & c1; + u += q & c1; + v += r & c1; + /* Shifts */ + g >>= 1; + u <<= 1; + v <<= 1; + /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */ + VERIFY_CHECK(zeta >= -591 && zeta <= 591); + } + /* Return data in t and return value. */ + t->u = (int64_t)u; + t->v = (int64_t)v; + t->q = (int64_t)q; + t->r = (int64_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial + * 8*identity (which has determinant 2^6) means the overall outputs has determinant + * 2^65. */ + VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 65); + return zeta; +} + +/* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta). + * + * Input: eta: initial eta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final eta + * + * Implements the divsteps_n_matrix_var function from the explanation. + */ +static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) { + /* Transformation matrix; see comments in rustsecp256k1_v0_4_1_modinv64_divsteps_62. */ + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t f = f0, g = g0, m; + uint32_t w; + int i = 62, limit, zeros; + + for (;;) { + /* Use a sentinel bit to count zeros only up to i. */ + zeros = rustsecp256k1_v0_4_1_ctz64_var(g | (UINT64_MAX << i)); + /* Perform zeros divsteps at once; they all just divide g by two. */ + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + /* We're done once we've done 62 divsteps. */ + if (i == 0) break; + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); + /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */ + VERIFY_CHECK(eta >= -745 && eta <= 745); + /* If eta is negative, negate it and replace f,g with g,-f. */ + if (eta < 0) { + uint64_t tmp; + eta = -eta; + tmp = f; f = g; g = -tmp; + tmp = u; u = q; q = -tmp; + tmp = v; v = r; r = -tmp; + /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled + * out (as we'd be done before that point), and no more than eta+1 can be done as its + * will flip again once that happens. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + VERIFY_CHECK(limit > 0 && limit <= 62); + /* m is a mask for the bottom min(limit, 6) bits. */ + m = (UINT64_MAX >> (64 - limit)) & 63U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6) + * bits. */ + w = (f * g * (f * f - 2)) & m; + } else { + /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as + * eta tends to be smaller here. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + VERIFY_CHECK(limit > 0 && limit <= 62); + /* m is a mask for the bottom min(limit, 4) bits. */ + m = (UINT64_MAX >> (64 - limit)) & 15U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4) + * bits. */ + w = f + (((f + 1) & 4) << 1); + w = (-w * g) & m; + } + g += f * w; + q += u * w; + r += v * w; + VERIFY_CHECK((g & m) == 0); + } + /* Return data in t and return value. */ + t->u = (int64_t)u; + t->v = (int64_t)v; + t->q = (int64_t)q; + t->r = (int64_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 62 of them will have determinant 2^62. */ + VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62); + return eta; +} + +/* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62. + * + * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range + * (-2^62,2^62). + * + * This implements the update_de function from the explanation. + */ +static void rustsecp256k1_v0_4_1_modinv64_update_de_62(rustsecp256k1_v0_4_1_modinv64_signed62 *d, rustsecp256k1_v0_4_1_modinv64_signed62 *e, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t, const rustsecp256k1_v0_4_1_modinv64_modinfo* modinfo) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; + const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int64_t md, me, sd, se; + int128_t cd, ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ +#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ + sd = d4 >> 63; + se = e4 >> 63; + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + /* Begin computing t*[d,e]. */ + cd = (int128_t)u * d0 + (int128_t)v * e0; + ce = (int128_t)q * d0 + (int128_t)r * e0; + /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ + md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62; + me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62; + /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ + cd += (int128_t)modinfo->modulus.v[0] * md; + ce += (int128_t)modinfo->modulus.v[0] * me; + /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; + VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; + /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ + cd += (int128_t)u * d1 + (int128_t)v * e1; + ce += (int128_t)q * d1 + (int128_t)r * e1; + if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[1] * md; + ce += (int128_t)modinfo->modulus.v[1] * me; + } + d->v[0] = (int64_t)cd & M62; cd >>= 62; + e->v[0] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ + cd += (int128_t)u * d2 + (int128_t)v * e2; + ce += (int128_t)q * d2 + (int128_t)r * e2; + if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[2] * md; + ce += (int128_t)modinfo->modulus.v[2] * me; + } + d->v[1] = (int64_t)cd & M62; cd >>= 62; + e->v[1] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ + cd += (int128_t)u * d3 + (int128_t)v * e3; + ce += (int128_t)q * d3 + (int128_t)r * e3; + if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[3] * md; + ce += (int128_t)modinfo->modulus.v[3] * me; + } + d->v[2] = (int64_t)cd & M62; cd >>= 62; + e->v[2] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ + cd += (int128_t)u * d4 + (int128_t)v * e4; + ce += (int128_t)q * d4 + (int128_t)r * e4; + cd += (int128_t)modinfo->modulus.v[4] * md; + ce += (int128_t)modinfo->modulus.v[4] * me; + d->v[3] = (int64_t)cd & M62; cd >>= 62; + e->v[3] = (int64_t)ce & M62; ce >>= 62; + /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ + d->v[4] = (int64_t)cd; + e->v[4] = (int64_t)ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ +#endif +} + +/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1_v0_4_1_modinv64_update_fg_62(rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; + const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int128_t cf, cg; + /* Start computing t*[f,g]. */ + cf = (int128_t)u * f0 + (int128_t)v * g0; + cg = (int128_t)q * f0 + (int128_t)r * g0; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ + cf += (int128_t)u * f1 + (int128_t)v * g1; + cg += (int128_t)q * f1 + (int128_t)r * g1; + f->v[0] = (int64_t)cf & M62; cf >>= 62; + g->v[0] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ + cf += (int128_t)u * f2 + (int128_t)v * g2; + cg += (int128_t)q * f2 + (int128_t)r * g2; + f->v[1] = (int64_t)cf & M62; cf >>= 62; + g->v[1] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ + cf += (int128_t)u * f3 + (int128_t)v * g3; + cg += (int128_t)q * f3 + (int128_t)r * g3; + f->v[2] = (int64_t)cf & M62; cf >>= 62; + g->v[2] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ + cf += (int128_t)u * f4 + (int128_t)v * g4; + cg += (int128_t)q * f4 + (int128_t)r * g4; + f->v[3] = (int64_t)cf & M62; cf >>= 62; + g->v[3] = (int64_t)cg & M62; cg >>= 62; + /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ + f->v[4] = (int64_t)cf; + g->v[4] = (int64_t)cg; +} + +/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int64_t fi, gi; + int128_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int128_t)u * fi + (int128_t)v * gi; + cg = (int128_t)q * fi + (int128_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 62 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int128_t)u * fi + (int128_t)v * gi; + cg += (int128_t)q * fi + (int128_t)r * gi; + f->v[i - 1] = (int64_t)cf & M62; cf >>= 62; + g->v[i - 1] = (int64_t)cg & M62; cg >>= 62; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int64_t)cf; + g->v[len - 1] = (int64_t)cg; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ +static void rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ + rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_4_1_modinv64_signed62 g = *x; + int i; + int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */ + + /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */ + for (i = 0; i < 10; ++i) { + /* Compute transition matrix and new zeta after 59 divsteps. */ + rustsecp256k1_v0_4_1_modinv64_trans2x2 t; + zeta = rustsecp256k1_v0_4_1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1_v0_4_1_modinv64_update_fg_62(&f, &g, &t); +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[4], modinfo); + *x = d; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ +static void rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ + rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_4_1_modinv64_signed62 g = *x; +#ifdef VERIFY + int i = 0; +#endif + int j, len = 5; + int64_t eta = -1; /* eta = -delta; delta is initially 1 */ + int64_t cond, fn, gn; + + /* Do iterations of 62 divsteps each until g=0. */ + while (1) { + /* Compute transition matrix and new eta after 62 divsteps. */ + rustsecp256k1_v0_4_1_modinv64_trans2x2 t; + eta = rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(len, &f, &g, &t); + /* If the bottom limb of g is zero, there is a chance that g=0. */ + if (g.v[0] == 0) { + cond = 0; + /* Check if the other limbs are also 0. */ + for (j = 1; j < len; ++j) { + cond |= g.v[j]; + } + /* If so, we're done. */ + if (cond == 0) break; + } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int64_t)len - 2) >> 63; + cond |= fn ^ (fn >> 63); + cond |= gn ^ (gn >> 63); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint64_t)fn << 62; + g.v[len - 2] |= (uint64_t)gn << 62; + --len; + } +#ifdef VERIFY + VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); + *x = d; +} + +#endif /* SECP256K1_MODINV64_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include index 84a6ba6..b2c82ad 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_4_0_ecdh.h +include_HEADERS += include/rustsecp256k1_v0_4_1_ecdh.h noinst_HEADERS += src/modules/ecdh/main_impl.h noinst_HEADERS += src/modules/ecdh/tests_impl.h if USE_BENCHMARK diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h index 39ff5df..aa78a16 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h @@ -7,31 +7,31 @@ #ifndef SECP256K1_MODULE_ECDH_MAIN_H #define SECP256K1_MODULE_ECDH_MAIN_H -#include "include/secp256k1_ecdh.h" -#include "ecmult_const_impl.h" +#include "../../../include/secp256k1_ecdh.h" +#include "../../ecmult_const_impl.h" static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) { unsigned char version = (y32[31] & 0x01) | 0x02; - rustsecp256k1_v0_4_0_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha; (void)data; - rustsecp256k1_v0_4_0_sha256_initialize(&sha); - rustsecp256k1_v0_4_0_sha256_write(&sha, &version, 1); - rustsecp256k1_v0_4_0_sha256_write(&sha, x32, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, output); + rustsecp256k1_v0_4_1_sha256_initialize(&sha); + rustsecp256k1_v0_4_1_sha256_write(&sha, &version, 1); + rustsecp256k1_v0_4_1_sha256_write(&sha, x32, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, output); return 1; } -const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; -const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_default = ecdh_hash_function_sha256; +const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; +const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_default = ecdh_hash_function_sha256; -int rustsecp256k1_v0_4_0_ecdh(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_4_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_4_0_ecdh_hash_function hashfp, void *data) { +int rustsecp256k1_v0_4_1_ecdh(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, const rustsecp256k1_v0_4_1_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_4_1_ecdh_hash_function hashfp, void *data) { int ret = 0; int overflow = 0; - rustsecp256k1_v0_4_0_gej res; - rustsecp256k1_v0_4_0_ge pt; - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_gej res; + rustsecp256k1_v0_4_1_ge pt; + rustsecp256k1_v0_4_1_scalar s; unsigned char x[32]; unsigned char y[32]; @@ -41,29 +41,29 @@ int rustsecp256k1_v0_4_0_ecdh(const rustsecp256k1_v0_4_0_context* ctx, unsigned ARG_CHECK(scalar != NULL); if (hashfp == NULL) { - hashfp = rustsecp256k1_v0_4_0_ecdh_hash_function_default; + hashfp = rustsecp256k1_v0_4_1_ecdh_hash_function_default; } - rustsecp256k1_v0_4_0_pubkey_load(ctx, &pt, point); - rustsecp256k1_v0_4_0_scalar_set_b32(&s, scalar, &overflow); + rustsecp256k1_v0_4_1_pubkey_load(ctx, &pt, point); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, scalar, &overflow); - overflow |= rustsecp256k1_v0_4_0_scalar_is_zero(&s); - rustsecp256k1_v0_4_0_scalar_cmov(&s, &rustsecp256k1_v0_4_0_scalar_one, overflow); + overflow |= rustsecp256k1_v0_4_1_scalar_is_zero(&s); + rustsecp256k1_v0_4_1_scalar_cmov(&s, &rustsecp256k1_v0_4_1_scalar_one, overflow); - rustsecp256k1_v0_4_0_ecmult_const(&res, &pt, &s, 256); - rustsecp256k1_v0_4_0_ge_set_gej(&pt, &res); + rustsecp256k1_v0_4_1_ecmult_const(&res, &pt, &s, 256); + rustsecp256k1_v0_4_1_ge_set_gej(&pt, &res); /* Compute a hash of the point */ - rustsecp256k1_v0_4_0_fe_normalize(&pt.x); - rustsecp256k1_v0_4_0_fe_normalize(&pt.y); - rustsecp256k1_v0_4_0_fe_get_b32(x, &pt.x); - rustsecp256k1_v0_4_0_fe_get_b32(y, &pt.y); + rustsecp256k1_v0_4_1_fe_normalize(&pt.x); + rustsecp256k1_v0_4_1_fe_normalize(&pt.y); + rustsecp256k1_v0_4_1_fe_get_b32(x, &pt.x); + rustsecp256k1_v0_4_1_fe_get_b32(y, &pt.y); ret = hashfp(output, x, y, data); memset(x, 0, 32); memset(y, 0, 32); - rustsecp256k1_v0_4_0_scalar_clear(&s); + rustsecp256k1_v0_4_1_scalar_clear(&s); return !!ret & !overflow; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h index b33d9d2..a89d992 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h @@ -26,71 +26,71 @@ int ecdh_hash_function_custom(unsigned char *output, const unsigned char *x, con void test_ecdh_api(void) { /* Setup context that just counts errors */ - rustsecp256k1_v0_4_0_context *tctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_4_0_pubkey point; + rustsecp256k1_v0_4_1_context *tctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN); + rustsecp256k1_v0_4_1_pubkey point; unsigned char res[32]; unsigned char s_one[32] = { 0 }; int32_t ecount = 0; s_one[31] = 1; - rustsecp256k1_v0_4_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(tctx, &point, s_one) == 1); + rustsecp256k1_v0_4_1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(tctx, &point, s_one) == 1); /* Check all NULLs are detected */ - CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 3); /* Cleanup */ - rustsecp256k1_v0_4_0_context_destroy(tctx); + rustsecp256k1_v0_4_1_context_destroy(tctx); } void test_ecdh_generator_basepoint(void) { unsigned char s_one[32] = { 0 }; - rustsecp256k1_v0_4_0_pubkey point[2]; + rustsecp256k1_v0_4_1_pubkey point[2]; int i; s_one[31] = 1; /* Check against pubkey creation when the basepoint is the generator */ for (i = 0; i < 100; ++i) { - rustsecp256k1_v0_4_0_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha; unsigned char s_b32[32]; unsigned char output_ecdh[65]; unsigned char output_ser[32]; unsigned char point_ser[65]; size_t point_ser_len = sizeof(point_ser); - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_scalar s; random_scalar_order(&s); - rustsecp256k1_v0_4_0_scalar_get_b32(s_b32, &s); + rustsecp256k1_v0_4_1_scalar_get_b32(s_b32, &s); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point[0], s_one) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point[0], s_one) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point[1], s_b32) == 1); /* compute using ECDH function with custom hash function */ - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); /* compare */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(output_ecdh, point_ser, 65) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(output_ecdh, point_ser, 65) == 0); /* compute using ECDH function with default hash function */ - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); - rustsecp256k1_v0_4_0_sha256_initialize(&sha); - rustsecp256k1_v0_4_0_sha256_write(&sha, point_ser, point_ser_len); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, output_ser); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); + rustsecp256k1_v0_4_1_sha256_initialize(&sha); + rustsecp256k1_v0_4_1_sha256_write(&sha, point_ser, point_ser_len); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, output_ser); /* compare */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(output_ecdh, output_ser, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(output_ecdh, output_ser, 32) == 0); } } @@ -104,23 +104,23 @@ void test_bad_scalar(void) { }; unsigned char s_rand[32] = { 0 }; unsigned char output[32]; - rustsecp256k1_v0_4_0_scalar rand; - rustsecp256k1_v0_4_0_pubkey point; + rustsecp256k1_v0_4_1_scalar rand; + rustsecp256k1_v0_4_1_pubkey point; /* Create random point */ random_scalar_order(&rand); - rustsecp256k1_v0_4_0_scalar_get_b32(s_rand, &rand); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point, s_rand) == 1); + rustsecp256k1_v0_4_1_scalar_get_b32(s_rand, &rand); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point, s_rand) == 1); /* Try to multiply it by bad values */ - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); /* ...and a good one */ s_overflow[31] -= 1; - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); /* Hash function failure results in ecdh failure */ - CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); } void run_ecdh_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include index be2dbec..90e236c 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_4_0_extrakeys.h +include_HEADERS += include/rustsecp256k1_v0_4_1_extrakeys.h noinst_HEADERS += src/modules/extrakeys/tests_impl.h noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h noinst_HEADERS += src/modules/extrakeys/main_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h index 80a7184..e1ada81 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h @@ -4,143 +4,169 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_EXTRAKEYS_MAIN_ -#define _SECP256K1_MODULE_EXTRAKEYS_MAIN_ +#ifndef SECP256K1_MODULE_EXTRAKEYS_MAIN_H +#define SECP256K1_MODULE_EXTRAKEYS_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_extrakeys.h" -static SECP256K1_INLINE int rustsecp256k1_v0_4_0_xonly_pubkey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ge *ge, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) { - return rustsecp256k1_v0_4_0_pubkey_load(ctx, ge, (const rustsecp256k1_v0_4_0_pubkey *) pubkey); +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_xonly_pubkey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ge *ge, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) { + return rustsecp256k1_v0_4_1_pubkey_load(ctx, ge, (const rustsecp256k1_v0_4_1_pubkey *) pubkey); } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_xonly_pubkey_save(rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, rustsecp256k1_v0_4_0_ge *ge) { - rustsecp256k1_v0_4_0_pubkey_save((rustsecp256k1_v0_4_0_pubkey *) pubkey, ge); +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_xonly_pubkey_save(rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, rustsecp256k1_v0_4_1_ge *ge) { + rustsecp256k1_v0_4_1_pubkey_save((rustsecp256k1_v0_4_1_pubkey *) pubkey, ge); } -int rustsecp256k1_v0_4_0_xonly_pubkey_parse(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, const unsigned char *input32) { - rustsecp256k1_v0_4_0_ge pk; - rustsecp256k1_v0_4_0_fe x; +int rustsecp256k1_v0_4_1_xonly_pubkey_parse(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, const unsigned char *input32) { + rustsecp256k1_v0_4_1_ge pk; + rustsecp256k1_v0_4_1_fe x; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input32 != NULL); - if (!rustsecp256k1_v0_4_0_fe_set_b32(&x, input32)) { + if (!rustsecp256k1_v0_4_1_fe_set_b32(&x, input32)) { return 0; } - if (!rustsecp256k1_v0_4_0_ge_set_xo_var(&pk, &x, 0)) { + if (!rustsecp256k1_v0_4_1_ge_set_xo_var(&pk, &x, 0)) { return 0; } - if (!rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(&pk)) { + if (!rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(&pk)) { return 0; } - rustsecp256k1_v0_4_0_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_4_1_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_4_0_xonly_pubkey_serialize(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output32, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) { - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_xonly_pubkey_serialize(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output32, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) { + rustsecp256k1_v0_4_1_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output32 != NULL); memset(output32, 0, 32); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } - rustsecp256k1_v0_4_0_fe_get_b32(output32, &pk.x); + rustsecp256k1_v0_4_1_fe_get_b32(output32, &pk.x); return 1; } +int rustsecp256k1_v0_4_1_xonly_pubkey_cmp(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_xonly_pubkey* pk0, const rustsecp256k1_v0_4_1_xonly_pubkey* pk1) { + unsigned char out[2][32]; + const rustsecp256k1_v0_4_1_xonly_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pk0; pk[1] = pk1; + for (i = 0; i < 2; i++) { + /* If the public key is NULL or invalid, xonly_pubkey_serialize will + * call the illegal_callback and return 0. In that case we will + * serialize the key as all zeros which is less than any valid public + * key. This results in consistent comparisons even if NULL or invalid + * pubkeys are involved and prevents edge cases such as sorting + * algorithms that use this function and do not terminate as a + * result. */ + if (!rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, out[i], pk[i])) { + /* Note that xonly_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1_v0_4_1_memcmp_var(out[0], out[1], sizeof(out[1])); +} + /** Keeps a group element as is if it has an even Y and otherwise negates it. * y_parity is set to 0 in the former case and to 1 in the latter case. * Requires that the coordinates of r are normalized. */ -static int rustsecp256k1_v0_4_0_extrakeys_ge_even_y(rustsecp256k1_v0_4_0_ge *r) { +static int rustsecp256k1_v0_4_1_extrakeys_ge_even_y(rustsecp256k1_v0_4_1_ge *r) { int y_parity = 0; - VERIFY_CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(r)); + VERIFY_CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(r)); - if (rustsecp256k1_v0_4_0_fe_is_odd(&r->y)) { - rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1); + if (rustsecp256k1_v0_4_1_fe_is_odd(&r->y)) { + rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1); y_parity = 1; } return y_parity; } -int rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_4_0_pubkey *pubkey) { - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_4_1_pubkey *pubkey) { + rustsecp256k1_v0_4_1_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); ARG_CHECK(xonly_pubkey != NULL); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk, pubkey)) { return 0; } - tmp = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_4_0_xonly_pubkey_save(xonly_pubkey, &pk); + rustsecp256k1_v0_4_1_xonly_pubkey_save(xonly_pubkey, &pk); return 1; } -int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *output_pubkey, const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *output_pubkey, const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output_pubkey != NULL); memset(output_pubkey, 0, sizeof(*output_pubkey)); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(internal_pubkey != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) { + if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) { return 0; } - rustsecp256k1_v0_4_0_pubkey_save(output_pubkey, &pk); + rustsecp256k1_v0_4_1_pubkey_save(output_pubkey, &pk); return 1; } -int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge pk; unsigned char pk_expected32[32]; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(internal_pubkey != NULL); ARG_CHECK(tweaked_pubkey32 != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) { + if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) { return 0; } - rustsecp256k1_v0_4_0_fe_normalize_var(&pk.x); - rustsecp256k1_v0_4_0_fe_normalize_var(&pk.y); - rustsecp256k1_v0_4_0_fe_get_b32(pk_expected32, &pk.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&pk.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&pk.y); + rustsecp256k1_v0_4_1_fe_get_b32(pk_expected32, &pk.x); - return rustsecp256k1_v0_4_0_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 - && rustsecp256k1_v0_4_0_fe_is_odd(&pk.y) == tweaked_pk_parity; + return rustsecp256k1_v0_4_1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 + && rustsecp256k1_v0_4_1_fe_is_odd(&pk.y) == tweaked_pk_parity; } -static void rustsecp256k1_v0_4_0_keypair_save(rustsecp256k1_v0_4_0_keypair *keypair, const rustsecp256k1_v0_4_0_scalar *sk, rustsecp256k1_v0_4_0_ge *pk) { - rustsecp256k1_v0_4_0_scalar_get_b32(&keypair->data[0], sk); - rustsecp256k1_v0_4_0_pubkey_save((rustsecp256k1_v0_4_0_pubkey *)&keypair->data[32], pk); +static void rustsecp256k1_v0_4_1_keypair_save(rustsecp256k1_v0_4_1_keypair *keypair, const rustsecp256k1_v0_4_1_scalar *sk, rustsecp256k1_v0_4_1_ge *pk) { + rustsecp256k1_v0_4_1_scalar_get_b32(&keypair->data[0], sk); + rustsecp256k1_v0_4_1_pubkey_save((rustsecp256k1_v0_4_1_pubkey *)&keypair->data[32], pk); } -static int rustsecp256k1_v0_4_0_keypair_seckey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar *sk, const rustsecp256k1_v0_4_0_keypair *keypair) { +static int rustsecp256k1_v0_4_1_keypair_seckey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar *sk, const rustsecp256k1_v0_4_1_keypair *keypair) { int ret; - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(sk, &keypair->data[0]); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(sk, &keypair->data[0]); /* We can declassify ret here because sk is only zero if a keypair function * failed (which zeroes the keypair) and its return value is ignored. */ - rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret)); ARG_CHECK(ret); return ret; } @@ -148,45 +174,55 @@ static int rustsecp256k1_v0_4_0_keypair_seckey_load(const rustsecp256k1_v0_4_0_c /* Load a keypair into pk and sk (if non-NULL). This function declassifies pk * and ARG_CHECKs that the keypair is not invalid. It always initializes sk and * pk with dummy values. */ -static int rustsecp256k1_v0_4_0_keypair_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar *sk, rustsecp256k1_v0_4_0_ge *pk, const rustsecp256k1_v0_4_0_keypair *keypair) { +static int rustsecp256k1_v0_4_1_keypair_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar *sk, rustsecp256k1_v0_4_1_ge *pk, const rustsecp256k1_v0_4_1_keypair *keypair) { int ret; - const rustsecp256k1_v0_4_0_pubkey *pubkey = (const rustsecp256k1_v0_4_0_pubkey *)&keypair->data[32]; + const rustsecp256k1_v0_4_1_pubkey *pubkey = (const rustsecp256k1_v0_4_1_pubkey *)&keypair->data[32]; /* Need to declassify the pubkey because pubkey_load ARG_CHECKs if it's * invalid. */ - rustsecp256k1_v0_4_0_declassify(ctx, pubkey, sizeof(*pubkey)); - ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, pk, pubkey); + rustsecp256k1_v0_4_1_declassify(ctx, pubkey, sizeof(*pubkey)); + ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, pk, pubkey); if (sk != NULL) { - ret = ret && rustsecp256k1_v0_4_0_keypair_seckey_load(ctx, sk, keypair); + ret = ret && rustsecp256k1_v0_4_1_keypair_seckey_load(ctx, sk, keypair); } if (!ret) { - *pk = rustsecp256k1_v0_4_0_ge_const_g; + *pk = rustsecp256k1_v0_4_1_ge_const_g; if (sk != NULL) { - *sk = rustsecp256k1_v0_4_0_scalar_one; + *sk = rustsecp256k1_v0_4_1_scalar_one; } } return ret; } -int rustsecp256k1_v0_4_0_keypair_create(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_keypair *keypair, const unsigned char *seckey32) { - rustsecp256k1_v0_4_0_scalar sk; - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_keypair_create(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *seckey32) { + rustsecp256k1_v0_4_1_scalar sk; + rustsecp256k1_v0_4_1_ge pk; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(keypair != NULL); memset(keypair, 0, sizeof(*keypair)); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey32 != NULL); - ret = rustsecp256k1_v0_4_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); - rustsecp256k1_v0_4_0_keypair_save(keypair, &sk, &pk); - rustsecp256k1_v0_4_0_memczero(keypair, sizeof(*keypair), !ret); + ret = rustsecp256k1_v0_4_1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); + rustsecp256k1_v0_4_1_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_4_1_memczero(keypair, sizeof(*keypair), !ret); - rustsecp256k1_v0_4_0_scalar_clear(&sk); + rustsecp256k1_v0_4_1_scalar_clear(&sk); return ret; } -int rustsecp256k1_v0_4_0_keypair_pub(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const rustsecp256k1_v0_4_0_keypair *keypair) { +int rustsecp256k1_v0_4_1_keypair_sec(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_4_1_keypair *keypair) { + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(seckey != NULL); + memset(seckey, 0, 32); + ARG_CHECK(keypair != NULL); + + memcpy(seckey, &keypair->data[0], 32); + return 1; +} + +int rustsecp256k1_v0_4_1_keypair_pub(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const rustsecp256k1_v0_4_1_keypair *keypair) { VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); @@ -196,8 +232,8 @@ int rustsecp256k1_v0_4_0_keypair_pub(const rustsecp256k1_v0_4_0_context* ctx, ru return 1; } -int rustsecp256k1_v0_4_0_keypair_xonly_pub(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_4_0_keypair *keypair) { - rustsecp256k1_v0_4_0_ge pk; +int rustsecp256k1_v0_4_1_keypair_xonly_pub(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_4_1_keypair *keypair) { + rustsecp256k1_v0_4_1_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); @@ -205,46 +241,46 @@ int rustsecp256k1_v0_4_0_keypair_xonly_pub(const rustsecp256k1_v0_4_0_context* c memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(keypair != NULL); - if (!rustsecp256k1_v0_4_0_keypair_load(ctx, NULL, &pk, keypair)) { + if (!rustsecp256k1_v0_4_1_keypair_load(ctx, NULL, &pk, keypair)) { return 0; } - tmp = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_4_0_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_4_1_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_keypair *keypair, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge pk; - rustsecp256k1_v0_4_0_scalar sk; +int rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge pk; + rustsecp256k1_v0_4_1_scalar sk; int y_parity; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(keypair != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_4_0_keypair_load(ctx, &sk, &pk, keypair); + ret = rustsecp256k1_v0_4_1_keypair_load(ctx, &sk, &pk, keypair); memset(keypair, 0, sizeof(*keypair)); - y_parity = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk); + y_parity = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk); if (y_parity == 1) { - rustsecp256k1_v0_4_0_scalar_negate(&sk, &sk); + rustsecp256k1_v0_4_1_scalar_negate(&sk, &sk); } - ret &= rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(&sk, tweak32); - ret &= rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32); + ret &= rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(&sk, tweak32); + ret &= rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32); - rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret)); if (ret) { - rustsecp256k1_v0_4_0_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_4_1_keypair_save(keypair, &sk, &pk); } - rustsecp256k1_v0_4_0_scalar_clear(&sk); + rustsecp256k1_v0_4_1_scalar_clear(&sk); return ret; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h index f137086..0d00075 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h @@ -4,60 +4,60 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_ -#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_ +#ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H +#define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H #include "src/modules/extrakeys/main_impl.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" -static void test_exhaustive_extrakeys(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge* group) { - rustsecp256k1_v0_4_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_4_0_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_extrakeys(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge* group) { + rustsecp256k1_v0_4_1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_4_1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parities[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; int i; for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_fe fe; - rustsecp256k1_v0_4_0_scalar scalar_i; + rustsecp256k1_v0_4_1_fe fe; + rustsecp256k1_v0_4_1_scalar scalar_i; unsigned char buf[33]; int parity; - rustsecp256k1_v0_4_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_4_0_scalar_get_b32(buf, &scalar_i); + rustsecp256k1_v0_4_1_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_4_1_scalar_get_b32(buf, &scalar_i); /* Construct pubkey and keypair. */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); /* Construct serialized xonly_pubkey from keypair. */ - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); CHECK(parity == parities[i - 1]); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Compare the xonly_pubkey bytes against the precomputed group. */ - rustsecp256k1_v0_4_0_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&fe, &group[i].x)); + rustsecp256k1_v0_4_1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&fe, &group[i].x)); /* Check the parity against the precomputed group. */ fe = group[i].y; - rustsecp256k1_v0_4_0_fe_normalize_var(&fe); - CHECK(rustsecp256k1_v0_4_0_fe_is_odd(&fe) == parities[i - 1]); + rustsecp256k1_v0_4_1_fe_normalize_var(&fe); + CHECK(rustsecp256k1_v0_4_1_fe_is_odd(&fe) == parities[i - 1]); /* Verify that the higher half is identical to the lower half mirrored. */ if (i > EXHAUSTIVE_TEST_ORDER / 2) { - CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h index ddbe5e3..1e56fda 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h @@ -4,24 +4,24 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_ -#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_ +#ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_H +#define SECP256K1_MODULE_EXTRAKEYS_TESTS_H -#include "secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" -static rustsecp256k1_v0_4_0_context* api_test_context(int flags, int *ecount) { - rustsecp256k1_v0_4_0_context *ctx0 = rustsecp256k1_v0_4_0_context_create(flags); - rustsecp256k1_v0_4_0_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount); +static rustsecp256k1_v0_4_1_context* api_test_context(int flags, int *ecount) { + rustsecp256k1_v0_4_1_context *ctx0 = rustsecp256k1_v0_4_1_context_create(flags); + rustsecp256k1_v0_4_1_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount); return ctx0; } void test_xonly_pubkey(void) { - rustsecp256k1_v0_4_0_pubkey pk; - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk, xonly_pk_tmp; - rustsecp256k1_v0_4_0_ge pk1; - rustsecp256k1_v0_4_0_ge pk2; - rustsecp256k1_v0_4_0_fe y; + rustsecp256k1_v0_4_1_pubkey pk; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_4_1_ge pk1; + rustsecp256k1_v0_4_1_ge pk2; + rustsecp256k1_v0_4_1_fe y; unsigned char sk[32]; unsigned char xy_sk[32]; unsigned char buf32[32]; @@ -31,249 +31,286 @@ void test_xonly_pubkey(void) { int i; int ecount; - rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); + rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); - rustsecp256k1_v0_4_0_testrand256(sk); + rustsecp256k1_v0_4_1_testrand256(sk); memset(ones32, 0xFF, 32); - rustsecp256k1_v0_4_0_testrand256(xy_sk); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); + rustsecp256k1_v0_4_1_testrand256(xy_sk); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); /* Test xonly_pubkey_from_pubkey */ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0); CHECK(ecount == 2); memset(&pk, 0, sizeof(pk)); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0); CHECK(ecount == 3); /* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */ memset(sk, 0, sizeof(sk)); sk[0] = 1; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); CHECK(pk_parity == 0); /* Choose a secret key such that pubkey and xonly_pubkey are each others * negation. */ sk[0] = 2; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); CHECK(pk_parity == 1); - rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk1, &pk); - rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_4_0_pubkey *) &xonly_pk); - CHECK(rustsecp256k1_v0_4_0_fe_equal(&pk1.x, &pk2.x) == 1); - rustsecp256k1_v0_4_0_fe_negate(&y, &pk2.y, 1); - CHECK(rustsecp256k1_v0_4_0_fe_equal(&pk1.y, &y) == 1); + rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk1, &pk); + rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_4_1_pubkey *) &xonly_pk); + CHECK(rustsecp256k1_v0_4_1_fe_equal(&pk1.x, &pk2.x) == 1); + rustsecp256k1_v0_4_1_fe_negate(&y, &pk2.y, 1); + CHECK(rustsecp256k1_v0_4_1_fe_equal(&pk1.y, &y) == 1); /* Test xonly_pubkey_serialize and xonly_pubkey_parse */ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf32, zeros64, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf32, zeros64, 32) == 0); CHECK(ecount == 2); { /* A pubkey filled with 0s will fail to serialize due to pubkey_load * special casing. */ - rustsecp256k1_v0_4_0_xonly_pubkey pk_tmp; + rustsecp256k1_v0_4_1_xonly_pubkey pk_tmp; memset(&pk_tmp, 0, sizeof(pk_tmp)); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0); } /* pubkey_load called illegal callback */ CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, NULL, buf32) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, NULL, buf32) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0); CHECK(ecount == 2); /* Serialization and parse roundtrip */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); /* Test parsing invalid field elements */ memset(&xonly_pk, 1, sizeof(xonly_pk)); /* Overflowing field element */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); memset(&xonly_pk, 1, sizeof(xonly_pk)); /* There's no point with x-coordinate 0 on secp256k1 */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); /* If a random 32-byte string can not be parsed with ec_pubkey_parse * (because interpreted as X coordinate it does not correspond to a point on * the curve) then xonly_pubkey_parse should fail as well. */ for (i = 0; i < count; i++) { unsigned char rand33[33]; - rustsecp256k1_v0_4_0_testrand256(&rand33[1]); + rustsecp256k1_v0_4_1_testrand256(&rand33[1]); rand33[0] = SECP256K1_TAG_PUBKEY_EVEN; - if (!rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pk, rand33, 33)) { + if (!rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pk, rand33, 33)) { memset(&xonly_pk, 1, sizeof(xonly_pk)); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); } else { - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1); } } CHECK(ecount == 2); - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(verify); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(verify); +} + +void test_xonly_pubkey_comparison(void) { + unsigned char pk1_ser[32] = { + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[32] = { + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + rustsecp256k1_v0_4_1_xonly_pubkey pk1; + rustsecp256k1_v0_4_1_xonly_pubkey pk2; + int ecount = 0; + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1); + + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk2) == 0); + CHECK(ecount == 2); + memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */ + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(ecount == 6); + + rustsecp256k1_v0_4_1_context_destroy(none); } void test_xonly_pubkey_tweak(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_4_0_pubkey internal_pk; - rustsecp256k1_v0_4_0_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_4_0_pubkey output_pk; + rustsecp256k1_v0_4_1_pubkey internal_pk; + rustsecp256k1_v0_4_1_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_4_1_pubkey output_pk; int pk_parity; unsigned char tweak[32]; int i; int ecount; - rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); + rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_4_0_testrand256(tweak); - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + rustsecp256k1_v0_4_1_testrand256(tweak); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0); CHECK(ecount == 4); /* NULL internal_xonly_pk zeroes the output_pk */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0); CHECK(ecount == 5); /* NULL tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* Invalid tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1); /* Fails if the resulting key was infinity */ for (i = 0; i < count; i++) { - rustsecp256k1_v0_4_0_scalar scalar_tweak; + rustsecp256k1_v0_4_1_scalar scalar_tweak; /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_4_0_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_4_0_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0) - || (rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + rustsecp256k1_v0_4_1_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_4_1_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_4_1_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0) + || (rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } /* Invalid pk with a valid tweak */ memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk)); - rustsecp256k1_v0_4_0_testrand256(tweak); + rustsecp256k1_v0_4_1_testrand256(tweak); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(verify); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(verify); } void test_xonly_pubkey_tweak_check(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_4_0_pubkey internal_pk; - rustsecp256k1_v0_4_0_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_4_0_pubkey output_pk; - rustsecp256k1_v0_4_0_xonly_pubkey output_xonly_pk; + rustsecp256k1_v0_4_1_pubkey internal_pk; + rustsecp256k1_v0_4_1_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_4_1_pubkey output_pk; + rustsecp256k1_v0_4_1_xonly_pubkey output_xonly_pk; unsigned char output_pk32[32]; unsigned char buf32[32]; int pk_parity; unsigned char tweak[32]; int ecount; - rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); + rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_4_0_testrand256(tweak); - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + rustsecp256k1_v0_4_1_testrand256(tweak); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 3); /* invalid pk_parity value */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0); CHECK(ecount == 5); memset(tweak, 1, sizeof(tweak)); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); /* Wrong pk_parity */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); /* Wrong public key */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); /* Overflowing tweak not allowed */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); CHECK(ecount == 5); - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(verify); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(verify); } /* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1 @@ -282,231 +319,256 @@ void test_xonly_pubkey_tweak_check(void) { #define N_PUBKEYS 32 void test_xonly_pubkey_tweak_recursive(void) { unsigned char sk[32]; - rustsecp256k1_v0_4_0_pubkey pk[N_PUBKEYS]; + rustsecp256k1_v0_4_1_pubkey pk[N_PUBKEYS]; unsigned char pk_serialized[32]; unsigned char tweak[N_PUBKEYS - 1][32]; int i; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk[0], sk) == 1); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk[0], sk) == 1); /* Add tweaks */ for (i = 0; i < N_PUBKEYS - 1; i++) { - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk; memset(tweak[i], i + 1, sizeof(tweak[i])); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1); } /* Verify tweaks */ for (i = N_PUBKEYS - 1; i > 0; i--) { - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk; int pk_parity; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); } } #undef N_PUBKEYS void test_keypair(void) { unsigned char sk[32]; + unsigned char sk_tmp[32]; unsigned char zeros96[96] = { 0 }; unsigned char overflows[32]; - rustsecp256k1_v0_4_0_keypair keypair; - rustsecp256k1_v0_4_0_pubkey pk, pk_tmp; - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_4_1_keypair keypair; + rustsecp256k1_v0_4_1_pubkey pk, pk_tmp; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk, xonly_pk_tmp; int pk_parity, pk_parity_tmp; int ecount; - rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); + rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); memset(overflows, 0xFF, sizeof(overflows)); /* Test keypair_create */ ecount = 0; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(none, &keypair, sk) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(none, &keypair, sk) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(verify, &keypair, sk) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(verify, &keypair, sk) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, NULL, sk) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, NULL, sk) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 4); /* Invalid secret key */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, zeros96) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, zeros96) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); /* Test keypair_pub */ ecount = 0; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, NULL, &keypair) == 0); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, NULL, &keypair) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* Using an invalid keypair is fine for keypair_pub */ memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* keypair holds the same pubkey as pubkey_create */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); /** Test keypair_xonly_pub **/ ecount = 0; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); /* Using an invalid keypair will set the xonly_pk to 0 (first reset * xonly_pk). */ - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); CHECK(ecount == 3); /** keypair holds the same xonly pubkey as pubkey_create **/ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); CHECK(pk_parity == pk_parity_tmp); - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(verify); + /* Test keypair_seckey */ + ecount = 0; + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, NULL, &keypair) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, NULL) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); + + /* keypair returns the same seckey it got */ + CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0); + + + /* Using an invalid keypair is fine for keypair_seckey */ + memset(&keypair, 0, sizeof(keypair)); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); + + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(verify); } void test_keypair_add(void) { unsigned char sk[32]; - rustsecp256k1_v0_4_0_keypair keypair; + rustsecp256k1_v0_4_1_keypair keypair; unsigned char overflows[32]; unsigned char zeros96[96] = { 0 }; unsigned char tweak[32]; int i; int ecount = 0; - rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); + rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); - rustsecp256k1_v0_4_0_testrand256(sk); - rustsecp256k1_v0_4_0_testrand256(tweak); + rustsecp256k1_v0_4_1_testrand256(sk); + rustsecp256k1_v0_4_1_testrand256(tweak); memset(overflows, 0xFF, 32); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(none, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(none, &keypair, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(sign, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(sign, &keypair, tweak) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, NULL, tweak) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0); CHECK(ecount == 4); /* This does not set the keypair to zeroes */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); /* Invalid tweak zeroes the keypair */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1); /* Fails if the resulting keypair was (sk=0, pk=infinity) */ for (i = 0; i < count; i++) { - rustsecp256k1_v0_4_0_scalar scalar_tweak; - rustsecp256k1_v0_4_0_keypair keypair_tmp; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); + rustsecp256k1_v0_4_1_scalar scalar_tweak; + rustsecp256k1_v0_4_1_keypair keypair_tmp; + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); memcpy(&keypair_tmp, &keypair, sizeof(keypair)); /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_4_0_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_4_0_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0) - || (rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 - || rustsecp256k1_v0_4_0_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); + rustsecp256k1_v0_4_1_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_4_1_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_4_1_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0) + || (rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 + || rustsecp256k1_v0_4_1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); } /* Invalid keypair with a valid tweak */ memset(&keypair, 0, sizeof(keypair)); - rustsecp256k1_v0_4_0_testrand256(tweak); + rustsecp256k1_v0_4_1_testrand256(tweak); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* Only seckey part of keypair invalid */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); memset(&keypair, 0, 32); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); CHECK(ecount == 2); /* Only pubkey part of keypair invalid */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); memset(&keypair.data[32], 0, 64); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); CHECK(ecount == 3); /* Check that the keypair_tweak_add implementation is correct */ - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); for (i = 0; i < count; i++) { - rustsecp256k1_v0_4_0_xonly_pubkey internal_pk; - rustsecp256k1_v0_4_0_xonly_pubkey output_pk; - rustsecp256k1_v0_4_0_pubkey output_pk_xy; - rustsecp256k1_v0_4_0_pubkey output_pk_expected; + rustsecp256k1_v0_4_1_xonly_pubkey internal_pk; + rustsecp256k1_v0_4_1_xonly_pubkey output_pk; + rustsecp256k1_v0_4_1_pubkey output_pk_xy; + rustsecp256k1_v0_4_1_pubkey output_pk_expected; unsigned char pk32[32]; + unsigned char sk32[32]; int pk_parity; - rustsecp256k1_v0_4_0_testrand256(tweak); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); + rustsecp256k1_v0_4_1_testrand256(tweak); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); /* Check that it passes xonly_pubkey_tweak_add_check */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1); /* Check that the resulting pubkey matches xonly_pubkey_tweak_add */ - CHECK(rustsecp256k1_v0_4_0_keypair_pub(ctx, &output_pk_xy, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); /* Check that the secret key in the keypair is tweaked correctly */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk32, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &output_pk_expected, sk32) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); } - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(verify); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(verify); } void run_extrakeys_tests(void) { @@ -515,6 +577,7 @@ void run_extrakeys_tests(void) { test_xonly_pubkey_tweak(); test_xonly_pubkey_tweak_check(); test_xonly_pubkey_tweak_recursive(); + test_xonly_pubkey_comparison(); /* keypair tests */ test_keypair(); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include index 6af734c..e658974 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_4_0_recovery.h +include_HEADERS += include/rustsecp256k1_v0_4_1_recovery.h noinst_HEADERS += src/modules/recovery/main_impl.h noinst_HEADERS += src/modules/recovery/tests_impl.h noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h index 6ef74bc..cdfaf32 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h @@ -7,36 +7,36 @@ #ifndef SECP256K1_MODULE_RECOVERY_MAIN_H #define SECP256K1_MODULE_RECOVERY_MAIN_H -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" -static void rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, int* recid, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig) { +static void rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, int* recid, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { - /* When the rustsecp256k1_v0_4_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_4_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_4_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { + /* When the rustsecp256k1_v0_4_1_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_4_1_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_4_1_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_4_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(s, &sig->data[32], NULL); } *recid = sig->data[64]; } -static void rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s, int recid) { - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { +static void rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s, int recid) { + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[32], s); } sig->data[64] = recid; } -int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { + rustsecp256k1_v0_4_1_scalar r, s; int ret = 1; int overflow = 0; @@ -45,111 +45,111 @@ int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(const rustsec ARG_CHECK(input64 != NULL); ARG_CHECK(recid >= 0 && recid <= 3); - rustsecp256k1_v0_4_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_4_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(sig, &r, &s, recid); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig) { + rustsecp256k1_v0_4_1_scalar r, s; (void)ctx; ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(recid != NULL); - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sigin) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sigin) { + rustsecp256k1_v0_4_1_scalar r, s; int recid; (void)ctx; ARG_CHECK(sig != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); - rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); + rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s); return 1; } -static int rustsecp256k1_v0_4_0_ecdsa_sig_recover(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar *sigr, const rustsecp256k1_v0_4_0_scalar* sigs, rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message, int recid) { +static int rustsecp256k1_v0_4_1_ecdsa_sig_recover(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar *sigr, const rustsecp256k1_v0_4_1_scalar* sigs, rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message, int recid) { unsigned char brx[32]; - rustsecp256k1_v0_4_0_fe fx; - rustsecp256k1_v0_4_0_ge x; - rustsecp256k1_v0_4_0_gej xj; - rustsecp256k1_v0_4_0_scalar rn, u1, u2; - rustsecp256k1_v0_4_0_gej qj; + rustsecp256k1_v0_4_1_fe fx; + rustsecp256k1_v0_4_1_ge x; + rustsecp256k1_v0_4_1_gej xj; + rustsecp256k1_v0_4_1_scalar rn, u1, u2; + rustsecp256k1_v0_4_1_gej qj; int r; - if (rustsecp256k1_v0_4_0_scalar_is_zero(sigr) || rustsecp256k1_v0_4_0_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_4_1_scalar_is_zero(sigr) || rustsecp256k1_v0_4_1_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_4_0_scalar_get_b32(brx, sigr); - r = rustsecp256k1_v0_4_0_fe_set_b32(&fx, brx); + rustsecp256k1_v0_4_1_scalar_get_b32(brx, sigr); + r = rustsecp256k1_v0_4_1_fe_set_b32(&fx, brx); (void)r; VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { - if (rustsecp256k1_v0_4_0_fe_cmp_var(&fx, &rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_4_1_fe_cmp_var(&fx, &rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order) >= 0) { return 0; } - rustsecp256k1_v0_4_0_fe_add(&fx, &rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe); + rustsecp256k1_v0_4_1_fe_add(&fx, &rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe); } - if (!rustsecp256k1_v0_4_0_ge_set_xo_var(&x, &fx, recid & 1)) { + if (!rustsecp256k1_v0_4_1_ge_set_xo_var(&x, &fx, recid & 1)) { return 0; } - rustsecp256k1_v0_4_0_gej_set_ge(&xj, &x); - rustsecp256k1_v0_4_0_scalar_inverse_var(&rn, sigr); - rustsecp256k1_v0_4_0_scalar_mul(&u1, &rn, message); - rustsecp256k1_v0_4_0_scalar_negate(&u1, &u1); - rustsecp256k1_v0_4_0_scalar_mul(&u2, &rn, sigs); - rustsecp256k1_v0_4_0_ecmult(ctx, &qj, &xj, &u2, &u1); - rustsecp256k1_v0_4_0_ge_set_gej_var(pubkey, &qj); - return !rustsecp256k1_v0_4_0_gej_is_infinity(&qj); + rustsecp256k1_v0_4_1_gej_set_ge(&xj, &x); + rustsecp256k1_v0_4_1_scalar_inverse_var(&rn, sigr); + rustsecp256k1_v0_4_1_scalar_mul(&u1, &rn, message); + rustsecp256k1_v0_4_1_scalar_negate(&u1, &u1); + rustsecp256k1_v0_4_1_scalar_mul(&u2, &rn, sigs); + rustsecp256k1_v0_4_1_ecmult(ctx, &qj, &xj, &u2, &u1); + rustsecp256k1_v0_4_1_ge_set_gej_var(pubkey, &qj); + return !rustsecp256k1_v0_4_1_gej_is_infinity(&qj); } -int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_4_1_scalar r, s; int ret, recid; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid); + ret = rustsecp256k1_v0_4_1_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(signature, &r, &s, recid); return ret; } -int rustsecp256k1_v0_4_0_ecdsa_recover(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { - rustsecp256k1_v0_4_0_ge q; - rustsecp256k1_v0_4_0_scalar r, s; - rustsecp256k1_v0_4_0_scalar m; +int rustsecp256k1_v0_4_1_ecdsa_recover(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { + rustsecp256k1_v0_4_1_ge q; + rustsecp256k1_v0_4_1_scalar r, s; + rustsecp256k1_v0_4_1_scalar m; int recid; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ - rustsecp256k1_v0_4_0_scalar_set_b32(&m, msghash32, NULL); - if (rustsecp256k1_v0_4_0_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &q); + rustsecp256k1_v0_4_1_scalar_set_b32(&m, msghash32, NULL); + if (rustsecp256k1_v0_4_1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &q); return 1; } else { memset(pubkey, 0, sizeof(*pubkey)); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h index ab59973..f7366d4 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h @@ -8,9 +8,9 @@ #define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H #include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" -void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { int i, j, k; uint64_t iter = 0; @@ -20,23 +20,23 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; - rustsecp256k1_v0_4_0_fe r_dot_y_normalized; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_4_0_ecdsa_signature sig; - rustsecp256k1_v0_4_0_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_4_1_fe r_dot_y_normalized; + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_4_1_ecdsa_signature sig; + rustsecp256k1_v0_4_1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; int expected_recid; int recid; int overflow; - rustsecp256k1_v0_4_0_scalar_set_int(&msg, i); - rustsecp256k1_v0_4_0_scalar_set_int(&sk, j); - rustsecp256k1_v0_4_0_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, i); + rustsecp256k1_v0_4_1_scalar_set_int(&sk, j); + rustsecp256k1_v0_4_1_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg); - rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_4_0_nonce_function_smallint, &k); + rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_4_1_nonce_function_smallint, &k); /* Check directly */ - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); r_from_k(&expected_r, group, k, &overflow); CHECK(r == expected_r); CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || @@ -50,18 +50,18 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons * in the real group. */ expected_recid = overflow ? 2 : 0; r_dot_y_normalized = group[k].y; - rustsecp256k1_v0_4_0_fe_normalize(&r_dot_y_normalized); + rustsecp256k1_v0_4_1_fe_normalize(&r_dot_y_normalized); /* Also the recovery id is flipped depending if we hit the low-s branch */ if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) { - expected_recid |= rustsecp256k1_v0_4_0_fe_is_odd(&r_dot_y_normalized); + expected_recid |= rustsecp256k1_v0_4_1_fe_is_odd(&r_dot_y_normalized); } else { - expected_recid |= !rustsecp256k1_v0_4_0_fe_is_odd(&r_dot_y_normalized); + expected_recid |= !rustsecp256k1_v0_4_1_fe_is_odd(&r_dot_y_normalized); } CHECK(recid == expected_recid); /* Convert to a standard sig then check */ - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -79,7 +79,7 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons } } -void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { /* This is essentially a copy of test_exhaustive_verify, with recovery added */ int s, r, msg, key; uint64_t iter = 0; @@ -87,41 +87,41 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, co for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_4_0_ge nonconst_ge; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_4_0_ecdsa_signature sig; - rustsecp256k1_v0_4_0_pubkey pk; - rustsecp256k1_v0_4_0_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_4_0_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_4_1_ge nonconst_ge; + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_4_1_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pk; + rustsecp256k1_v0_4_1_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_4_1_scalar s_times_k_s, msg_plus_r_times_sk_s; int recid = 0; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_4_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_4_0_scalar_set_int(&r_s, r); - rustsecp256k1_v0_4_0_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_4_0_scalar_set_int(&sk_s, key); - rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_4_1_scalar_set_int(&s_s, s); + rustsecp256k1_v0_4_1_scalar_set_int(&r_s, r); + rustsecp256k1_v0_4_1_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_4_1_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg_s); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_4_0_scalar check_x_s; + rustsecp256k1_v0_4_1_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_4_0_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_4_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_4_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_4_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_4_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_4_1_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_4_1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_4_1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_4_1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_4_1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_4_0_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_4_1_scalar_is_high(&s_s); /* We would like to try recovering the pubkey and checking that it matches, * but pubkey recovery is impossible in the exhaustive tests (the reason @@ -129,19 +129,19 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, co * overlap between the sets, so there are no valid signatures). */ /* Verify by converting to a standard signature and calling verify */ - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_4_0_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_4_1_pubkey_save(&pk, &nonconst_ge); CHECK(should_verify == - rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -static void test_exhaustive_recovery(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +static void test_exhaustive_recovery(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { test_exhaustive_recovery_sign(ctx, group); test_exhaustive_recovery_verify(ctx, group); } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h index 37f367b..742d9f4 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h @@ -25,19 +25,19 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c } /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ memset(nonce32, 1, 32); - return rustsecp256k1_v0_4_0_testrand_bits(1); + return rustsecp256k1_v0_4_1_testrand_bits(1); } void test_ecdsa_recovery_api(void) { /* Setup contexts that just count errors */ - rustsecp256k1_v0_4_0_context *none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_4_0_context *sign = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_4_0_context *vrfy = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_4_0_context *both = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_pubkey recpubkey; - rustsecp256k1_v0_4_0_ecdsa_signature normal_sig; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature recsig; + rustsecp256k1_v0_4_1_context *none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_4_1_context *sign = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN); + rustsecp256k1_v0_4_1_context *vrfy = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_4_1_context *both = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey recpubkey; + rustsecp256k1_v0_4_1_ecdsa_signature normal_sig; + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature recsig; unsigned char privkey[32] = { 1 }; unsigned char message[32] = { 2 }; int32_t ecount = 0; @@ -49,160 +49,160 @@ void test_ecdsa_recovery_api(void) { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - rustsecp256k1_v0_4_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Check bad contexts and NULLs for signing */ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); CHECK(ecount == 5); /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ - rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); + rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); CHECK(ecount == 5); /* These will all fail, but not in ARG_CHECK way */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); /* This one will succeed. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 5); /* Check signing with a goofy nonce function */ /* Check bad contexts and NULLs for recovery */ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, NULL, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, NULL, &recsig, message) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, NULL, message) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, NULL, message) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); CHECK(ecount == 5); /* Check NULLs for conversion */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); /* Check NULLs for de/serialization */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); CHECK(ecount == 7); /* overflow in signature will fail but not affect ecount */ memcpy(sig, over_privkey, 32); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); CHECK(ecount == 7); /* cleanup */ - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(vrfy); - rustsecp256k1_v0_4_0_context_destroy(both); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(vrfy); + rustsecp256k1_v0_4_1_context_destroy(both); } void test_ecdsa_recovery_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; unsigned char message[32]; - rustsecp256k1_v0_4_0_ecdsa_signature signature[5]; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsignature[5]; + rustsecp256k1_v0_4_1_ecdsa_signature signature[5]; + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsignature[5]; unsigned char sig[74]; - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_pubkey recpubkey; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey recpubkey; int recid = 0; /* Generate a random key and message. */ { - rustsecp256k1_v0_4_0_scalar msg, key; + rustsecp256k1_v0_4_1_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_4_0_scalar_get_b32(message, &msg); + rustsecp256k1_v0_4_1_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_4_1_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Serialize/parse compact and verify/recover. */ extra[0] = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[4], &signature[0], 64) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[4], &signature[0], 64) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); memset(&rsignature[4], 0, sizeof(rsignature[4])); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); /* Parse compact (with recovery id) and recover. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); /* Serialize/destroy/parse signature and verify again. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - sig[rustsecp256k1_v0_4_0_testrand_bits(6)] += 1 + rustsecp256k1_v0_4_0_testrand_int(255); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + sig[rustsecp256k1_v0_4_1_testrand_bits(6)] += 1 + rustsecp256k1_v0_4_1_testrand_int(255); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); /* Recover again */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || - rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || + rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); } /* Tests several edge cases. */ @@ -225,7 +225,7 @@ void test_ecdsa_recovery_edge_cases(void) { 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86, 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57 }; - rustsecp256k1_v0_4_0_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey pubkey; /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ const unsigned char sigb64[64] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -237,19 +237,19 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, }; - rustsecp256k1_v0_4_0_pubkey pubkeyb; - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_4_0_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pubkeyb; + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_4_1_ecdsa_signature sig; int recid; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); for (recid = 0; recid < 4; recid++) { int i; @@ -294,40 +294,40 @@ void test_ecdsa_recovery_edge_cases(void) { 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 }; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); for (recid2 = 0; recid2 < 4; recid2++) { - rustsecp256k1_v0_4_0_pubkey pubkey2b; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); + rustsecp256k1_v0_4_1_pubkey pubkey2b; + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); /* Verifying with (order + r,4) should always fail. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } /* DER parsing tests. */ /* Zero length r/s. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); /* Leading zeros. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); sigbderalt3[4] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbderalt4[7] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); /* Damage signature. */ sigbder[7]++; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbder[7]--; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); for(i = 0; i < 8; i++) { int c; unsigned char orig = sigbder[i]; @@ -337,7 +337,7 @@ void test_ecdsa_recovery_edge_cases(void) { continue; } sigbder[i] = c; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } sigbder[i] = orig; } @@ -357,25 +357,25 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; - rustsecp256k1_v0_4_0_pubkey pubkeyc; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); + rustsecp256k1_v0_4_1_pubkey pubkeyc; + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); sigcder[4] = 0; sigc64[31] = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); sigcder[4] = 1; sigcder[7] = 0; sigc64[31] = 1; sigc64[63] = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include index aaa62d5..20564fa 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_4_0_schnorrsig.h +include_HEADERS += include/rustsecp256k1_v0_4_1_schnorrsig.h noinst_HEADERS += src/modules/schnorrsig/main_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h index 4c1a5df..efb9cf6 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h @@ -4,17 +4,17 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_SCHNORRSIG_MAIN_ -#define _SECP256K1_MODULE_SCHNORRSIG_MAIN_ +#ifndef SECP256K1_MODULE_SCHNORRSIG_MAIN_H +#define SECP256K1_MODULE_SCHNORRSIG_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" -#include "hash.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_schnorrsig.h" +#include "../../hash.h" /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */ -static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_4_0_sha256 *sha) { - rustsecp256k1_v0_4_0_sha256_initialize(sha); +static void rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_4_1_sha256 *sha) { + rustsecp256k1_v0_4_1_sha256_initialize(sha); sha->s[0] = 0x46615b35ul; sha->s[1] = 0xf4bfbff7ul; sha->s[2] = 0x9f8dc671ul; @@ -29,8 +29,8 @@ static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(rustsecp256 /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/aux")||SHA256("BIP0340/aux"). */ -static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_4_0_sha256 *sha) { - rustsecp256k1_v0_4_0_sha256_initialize(sha); +static void rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_4_1_sha256 *sha) { + rustsecp256k1_v0_4_1_sha256_initialize(sha); sha->s[0] = 0x24dd3219ul; sha->s[1] = 0x4eba7e70ul; sha->s[2] = 0xca0fabb9ul; @@ -48,7 +48,7 @@ static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(rustsec static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0"; static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - rustsecp256k1_v0_4_0_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha; unsigned char masked_key[32]; int i; @@ -57,9 +57,9 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms } if (data != NULL) { - rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(&sha); - rustsecp256k1_v0_4_0_sha256_write(&sha, data, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, masked_key); + rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(&sha); + rustsecp256k1_v0_4_1_sha256_write(&sha, data, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, masked_key); for (i = 0; i < 32; i++) { masked_key[i] ^= key32[i]; } @@ -68,35 +68,35 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms /* Tag the hash with algo16 which is important to avoid nonce reuse across * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ - if (rustsecp256k1_v0_4_0_memcmp_var(algo16, bip340_algo16, 16) == 0) { - rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(&sha); + if (rustsecp256k1_v0_4_1_memcmp_var(algo16, bip340_algo16, 16) == 0) { + rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(&sha); } else { int algo16_len = 16; /* Remove terminating null bytes */ while (algo16_len > 0 && !algo16[algo16_len - 1]) { algo16_len--; } - rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, algo16, algo16_len); + rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, algo16, algo16_len); } /* Hash (masked-)key||pk||msg using the tagged hash as per the spec */ if (data != NULL) { - rustsecp256k1_v0_4_0_sha256_write(&sha, masked_key, 32); + rustsecp256k1_v0_4_1_sha256_write(&sha, masked_key, 32); } else { - rustsecp256k1_v0_4_0_sha256_write(&sha, key32, 32); + rustsecp256k1_v0_4_1_sha256_write(&sha, key32, 32); } - rustsecp256k1_v0_4_0_sha256_write(&sha, xonly_pk32, 32); - rustsecp256k1_v0_4_0_sha256_write(&sha, msg32, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, nonce32); + rustsecp256k1_v0_4_1_sha256_write(&sha, xonly_pk32, 32); + rustsecp256k1_v0_4_1_sha256_write(&sha, msg32, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, nonce32); return 1; } -const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp256k1_v0_4_0_nonce_function_bip340 = nonce_function_bip340; +const rustsecp256k1_v0_4_1_nonce_function_hardened rustsecp256k1_v0_4_1_nonce_function_bip340 = nonce_function_bip340; /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/challenge")||SHA256("BIP0340/challenge"). */ -static void rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_0_sha256 *sha) { - rustsecp256k1_v0_4_0_sha256_initialize(sha); +static void rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_1_sha256 *sha) { + rustsecp256k1_v0_4_1_sha256_initialize(sha); sha->s[0] = 0x9cecba11ul; sha->s[1] = 0x23925381ul; sha->s[2] = 0x11679112ul; @@ -108,132 +108,132 @@ static void rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_0_s sha->bytes = 64; } -static void rustsecp256k1_v0_4_0_schnorrsig_challenge(rustsecp256k1_v0_4_0_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32) +static void rustsecp256k1_v0_4_1_schnorrsig_challenge(rustsecp256k1_v0_4_1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32) { unsigned char buf[32]; - rustsecp256k1_v0_4_0_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha; /* tagged hash(r.x, pk.x, msg32) */ - rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(&sha); - rustsecp256k1_v0_4_0_sha256_write(&sha, r32, 32); - rustsecp256k1_v0_4_0_sha256_write(&sha, pubkey32, 32); - rustsecp256k1_v0_4_0_sha256_write(&sha, msg32, 32); - rustsecp256k1_v0_4_0_sha256_finalize(&sha, buf); + rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(&sha); + rustsecp256k1_v0_4_1_sha256_write(&sha, r32, 32); + rustsecp256k1_v0_4_1_sha256_write(&sha, pubkey32, 32); + rustsecp256k1_v0_4_1_sha256_write(&sha, msg32, 32); + rustsecp256k1_v0_4_1_sha256_finalize(&sha, buf); /* Set scalar e to the challenge hash modulo the curve order as per * BIP340. */ - rustsecp256k1_v0_4_0_scalar_set_b32(e, buf, NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(e, buf, NULL); } -int rustsecp256k1_v0_4_0_schnorrsig_sign(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_0_keypair *keypair, rustsecp256k1_v0_4_0_nonce_function_hardened noncefp, void *ndata) { - rustsecp256k1_v0_4_0_scalar sk; - rustsecp256k1_v0_4_0_scalar e; - rustsecp256k1_v0_4_0_scalar k; - rustsecp256k1_v0_4_0_gej rj; - rustsecp256k1_v0_4_0_ge pk; - rustsecp256k1_v0_4_0_ge r; +int rustsecp256k1_v0_4_1_schnorrsig_sign(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_1_keypair *keypair, rustsecp256k1_v0_4_1_nonce_function_hardened noncefp, void *ndata) { + rustsecp256k1_v0_4_1_scalar sk; + rustsecp256k1_v0_4_1_scalar e; + rustsecp256k1_v0_4_1_scalar k; + rustsecp256k1_v0_4_1_gej rj; + rustsecp256k1_v0_4_1_ge pk; + rustsecp256k1_v0_4_1_ge r; unsigned char buf[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; int ret = 1; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(sig64 != NULL); ARG_CHECK(msg32 != NULL); ARG_CHECK(keypair != NULL); if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_4_0_nonce_function_bip340; + noncefp = rustsecp256k1_v0_4_1_nonce_function_bip340; } - ret &= rustsecp256k1_v0_4_0_keypair_load(ctx, &sk, &pk, keypair); + ret &= rustsecp256k1_v0_4_1_keypair_load(ctx, &sk, &pk, keypair); /* Because we are signing for a x-only pubkey, the secret key is negated * before signing if the point corresponding to the secret key does not * have an even Y. */ - if (rustsecp256k1_v0_4_0_fe_is_odd(&pk.y)) { - rustsecp256k1_v0_4_0_scalar_negate(&sk, &sk); + if (rustsecp256k1_v0_4_1_fe_is_odd(&pk.y)) { + rustsecp256k1_v0_4_1_scalar_negate(&sk, &sk); } - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sk); - rustsecp256k1_v0_4_0_fe_get_b32(pk_buf, &pk.x); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sk); + rustsecp256k1_v0_4_1_fe_get_b32(pk_buf, &pk.x); ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata); - rustsecp256k1_v0_4_0_scalar_set_b32(&k, buf, NULL); - ret &= !rustsecp256k1_v0_4_0_scalar_is_zero(&k); - rustsecp256k1_v0_4_0_scalar_cmov(&k, &rustsecp256k1_v0_4_0_scalar_one, !ret); + rustsecp256k1_v0_4_1_scalar_set_b32(&k, buf, NULL); + ret &= !rustsecp256k1_v0_4_1_scalar_is_zero(&k); + rustsecp256k1_v0_4_1_scalar_cmov(&k, &rustsecp256k1_v0_4_1_scalar_one, !ret); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); - rustsecp256k1_v0_4_0_ge_set_gej(&r, &rj); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); + rustsecp256k1_v0_4_1_ge_set_gej(&r, &rj); /* We declassify r to allow using it as a branch point. This is fine * because r is not a secret. */ - rustsecp256k1_v0_4_0_declassify(ctx, &r, sizeof(r)); - rustsecp256k1_v0_4_0_fe_normalize_var(&r.y); - if (rustsecp256k1_v0_4_0_fe_is_odd(&r.y)) { - rustsecp256k1_v0_4_0_scalar_negate(&k, &k); + rustsecp256k1_v0_4_1_declassify(ctx, &r, sizeof(r)); + rustsecp256k1_v0_4_1_fe_normalize_var(&r.y); + if (rustsecp256k1_v0_4_1_fe_is_odd(&r.y)) { + rustsecp256k1_v0_4_1_scalar_negate(&k, &k); } - rustsecp256k1_v0_4_0_fe_normalize_var(&r.x); - rustsecp256k1_v0_4_0_fe_get_b32(&sig64[0], &r.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&r.x); + rustsecp256k1_v0_4_1_fe_get_b32(&sig64[0], &r.x); - rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf); - rustsecp256k1_v0_4_0_scalar_mul(&e, &e, &sk); - rustsecp256k1_v0_4_0_scalar_add(&e, &e, &k); - rustsecp256k1_v0_4_0_scalar_get_b32(&sig64[32], &e); + rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf); + rustsecp256k1_v0_4_1_scalar_mul(&e, &e, &sk); + rustsecp256k1_v0_4_1_scalar_add(&e, &e, &k); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig64[32], &e); - rustsecp256k1_v0_4_0_memczero(sig64, 64, !ret); - rustsecp256k1_v0_4_0_scalar_clear(&k); - rustsecp256k1_v0_4_0_scalar_clear(&sk); + rustsecp256k1_v0_4_1_memczero(sig64, 64, !ret); + rustsecp256k1_v0_4_1_scalar_clear(&k); + rustsecp256k1_v0_4_1_scalar_clear(&sk); memset(seckey, 0, sizeof(seckey)); return ret; } -int rustsecp256k1_v0_4_0_schnorrsig_verify(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) { - rustsecp256k1_v0_4_0_scalar s; - rustsecp256k1_v0_4_0_scalar e; - rustsecp256k1_v0_4_0_gej rj; - rustsecp256k1_v0_4_0_ge pk; - rustsecp256k1_v0_4_0_gej pkj; - rustsecp256k1_v0_4_0_fe rx; - rustsecp256k1_v0_4_0_ge r; +int rustsecp256k1_v0_4_1_schnorrsig_verify(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) { + rustsecp256k1_v0_4_1_scalar s; + rustsecp256k1_v0_4_1_scalar e; + rustsecp256k1_v0_4_1_gej rj; + rustsecp256k1_v0_4_1_ge pk; + rustsecp256k1_v0_4_1_gej pkj; + rustsecp256k1_v0_4_1_fe rx; + rustsecp256k1_v0_4_1_ge r; unsigned char buf[32]; int overflow; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(sig64 != NULL); ARG_CHECK(msg32 != NULL); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_4_0_fe_set_b32(&rx, &sig64[0])) { + if (!rustsecp256k1_v0_4_1_fe_set_b32(&rx, &sig64[0])) { return 0; } - rustsecp256k1_v0_4_0_scalar_set_b32(&s, &sig64[32], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, &sig64[32], &overflow); if (overflow) { return 0; } - if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } /* Compute e. */ - rustsecp256k1_v0_4_0_fe_get_b32(buf, &pk.x); - rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, buf); + rustsecp256k1_v0_4_1_fe_get_b32(buf, &pk.x); + rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, &sig64[0], msg32, buf); /* Compute rj = s*G + (-e)*pkj */ - rustsecp256k1_v0_4_0_scalar_negate(&e, &e); - rustsecp256k1_v0_4_0_gej_set_ge(&pkj, &pk); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s); + rustsecp256k1_v0_4_1_scalar_negate(&e, &e); + rustsecp256k1_v0_4_1_gej_set_ge(&pkj, &pk); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s); - rustsecp256k1_v0_4_0_ge_set_gej_var(&r, &rj); - if (rustsecp256k1_v0_4_0_ge_is_infinity(&r)) { + rustsecp256k1_v0_4_1_ge_set_gej_var(&r, &rj); + if (rustsecp256k1_v0_4_1_ge_is_infinity(&r)) { return 0; } - rustsecp256k1_v0_4_0_fe_normalize_var(&r.y); - return !rustsecp256k1_v0_4_0_fe_is_odd(&r.y) && - rustsecp256k1_v0_4_0_fe_equal_var(&rx, &r.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&r.y); + return !rustsecp256k1_v0_4_1_fe_is_odd(&r.y) && + rustsecp256k1_v0_4_1_fe_equal_var(&rx, &r.x); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h index 6f799c2..82e352a 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -4,10 +4,10 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_ -#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_ +#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H +#define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H -#include "include/secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" #include "src/modules/schnorrsig/main_impl.h" static const unsigned char invalid_pubkey_bytes[][32] = { @@ -58,21 +58,21 @@ static const unsigned char invalid_pubkey_bytes[][32] = { #define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) -static int rustsecp256k1_v0_4_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +static int rustsecp256k1_v0_4_1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void* data) { - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_scalar s; int *idata = data; (void)msg32; (void)key32; (void)xonly_pk32; (void)algo16; - rustsecp256k1_v0_4_0_scalar_set_int(&s, *idata); - rustsecp256k1_v0_4_0_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_4_1_scalar_set_int(&s, *idata); + rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &s); return 1; } -static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { +static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { int d; uint64_t iter = 0; /* Iterate over the possible public keys to verify against (through their corresponding DL d). */ @@ -98,10 +98,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context } /* Randomly generate messages until all challenges have been hit. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_4_0_scalar e; + rustsecp256k1_v0_4_1_scalar e; unsigned char msg32[32]; - rustsecp256k1_v0_4_0_testrand256(msg32); - rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, sig64, msg32, pk32); + rustsecp256k1_v0_4_1_testrand256(msg32); + rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, sig64, msg32, pk32); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { /* Iterate over the possible valid last 32 bytes in the signature. @@ -110,16 +110,16 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) { int expect_valid, valid; if (s <= EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_4_0_scalar s_s; - rustsecp256k1_v0_4_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_4_0_scalar_get_b32(sig64 + 32, &s_s); + rustsecp256k1_v0_4_1_scalar s_s; + rustsecp256k1_v0_4_1_scalar_set_int(&s_s, s); + rustsecp256k1_v0_4_1_scalar_get_b32(sig64 + 32, &s_s); expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER && (s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER); } else { - rustsecp256k1_v0_4_0_testrand256(sig64 + 32); + rustsecp256k1_v0_4_1_testrand256(sig64 + 32); expect_valid = 0; } - valid = rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]); + valid = rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]); CHECK(valid == expect_valid); count_valid += valid; } @@ -134,7 +134,7 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context } } -static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_4_0_keypair* keypairs, const int* parities) { +static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_4_1_keypair* keypairs, const int* parities) { int d, k; uint64_t iter = 0; /* Loop over keys. */ @@ -152,20 +152,20 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context * if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; /* Generate random messages until all challenges have been tried. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_4_0_scalar e; - rustsecp256k1_v0_4_0_testrand256(msg32); - rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]); + rustsecp256k1_v0_4_1_scalar e; + rustsecp256k1_v0_4_1_testrand256(msg32); + rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { - rustsecp256k1_v0_4_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; + rustsecp256k1_v0_4_1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; unsigned char expected_s_bytes[32]; - rustsecp256k1_v0_4_0_scalar_get_b32(expected_s_bytes, &expected_s); + rustsecp256k1_v0_4_1_scalar_get_b32(expected_s_bytes, &expected_s); /* Invoke the real function to construct a signature. */ - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], rustsecp256k1_v0_4_0_hardened_nonce_function_smallint, &k)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], rustsecp256k1_v0_4_1_hardened_nonce_function_smallint, &k)); /* The first 32 bytes must match the xonly pubkey for the specified k. */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); /* The last 32 bytes must match the expected s value. */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); /* Don't retry other messages that result in the same challenge. */ e_done[e] = 1; ++e_count_done; @@ -175,28 +175,28 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context * } } -static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_4_0_context *ctx) { - rustsecp256k1_v0_4_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_4_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_4_1_context *ctx) { + rustsecp256k1_v0_4_1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_4_1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parity[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; unsigned i; /* Verify that all invalid_pubkey_bytes are actually invalid. */ for (i = 0; i < NUM_INVALID_KEYS; ++i) { - rustsecp256k1_v0_4_0_xonly_pubkey pk; - CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); + rustsecp256k1_v0_4_1_xonly_pubkey pk; + CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); } /* Construct keypairs and xonly-pubkeys for the entire group. */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) { - rustsecp256k1_v0_4_0_scalar scalar_i; + rustsecp256k1_v0_4_1_scalar scalar_i; unsigned char buf[32]; - rustsecp256k1_v0_4_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_4_0_scalar_get_b32(buf, &scalar_i); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + rustsecp256k1_v0_4_1_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_4_1_scalar_get_b32(buf, &scalar_i); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); } test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h index 0fabecf..f8876fa 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h @@ -4,10 +4,10 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_ -#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_ +#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_H +#define SECP256K1_MODULE_SCHNORRSIG_TESTS_H -#include "secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" /* Checks that a bit flip in the n_flip-th argument (that has n_bytes many * bytes) changes the hash function @@ -15,28 +15,28 @@ void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) { unsigned char nonces[2][32]; CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1); - rustsecp256k1_v0_4_0_testrand_flip(args[n_flip], n_bytes); + rustsecp256k1_v0_4_1_testrand_flip(args[n_flip], n_bytes); CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonces[0], nonces[1], 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonces[0], nonces[1], 32) != 0); } /* Tests for the equality of two sha256 structs. This function only produces a * correct result if an integer multiple of 64 many bytes have been written * into the hash functions. */ -void test_sha256_eq(const rustsecp256k1_v0_4_0_sha256 *sha1, const rustsecp256k1_v0_4_0_sha256 *sha2) { +void test_sha256_eq(const rustsecp256k1_v0_4_1_sha256 *sha1, const rustsecp256k1_v0_4_1_sha256 *sha2) { /* Is buffer fully consumed? */ CHECK((sha1->bytes & 0x3F) == 0); CHECK(sha1->bytes == sha2->bytes); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); } void run_nonce_function_bip340_tests(void) { unsigned char tag[13] = "BIP0340/nonce"; unsigned char aux_tag[11] = "BIP0340/aux"; unsigned char algo16[16] = "BIP0340/nonce\0\0\0"; - rustsecp256k1_v0_4_0_sha256 sha; - rustsecp256k1_v0_4_0_sha256 sha_optimized; + rustsecp256k1_v0_4_1_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha_optimized; unsigned char nonce[32]; unsigned char msg[32]; unsigned char key[32]; @@ -46,23 +46,23 @@ void run_nonce_function_bip340_tests(void) { int i; /* Check that hash initialized by - * rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged has the expected + * rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged has the expected * state. */ - rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, tag, sizeof(tag)); - rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, tag, sizeof(tag)); + rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); /* Check that hash initialized by - * rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux has the expected + * rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux has the expected * state. */ - rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); - rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); + rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); + rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); - rustsecp256k1_v0_4_0_testrand256(msg); - rustsecp256k1_v0_4_0_testrand256(key); - rustsecp256k1_v0_4_0_testrand256(pk); - rustsecp256k1_v0_4_0_testrand256(aux_rand); + rustsecp256k1_v0_4_1_testrand256(msg); + rustsecp256k1_v0_4_1_testrand256(key); + rustsecp256k1_v0_4_1_testrand256(pk); + rustsecp256k1_v0_4_1_testrand256(aux_rand); /* Check that a bitflip in an argument results in different nonces. */ args[0] = msg; @@ -102,89 +102,89 @@ void test_schnorrsig_api(void) { unsigned char sk2[32]; unsigned char sk3[32]; unsigned char msg[32]; - rustsecp256k1_v0_4_0_keypair keypairs[3]; - rustsecp256k1_v0_4_0_keypair invalid_keypair = { 0 }; - rustsecp256k1_v0_4_0_xonly_pubkey pk[3]; - rustsecp256k1_v0_4_0_xonly_pubkey zero_pk; + rustsecp256k1_v0_4_1_keypair keypairs[3]; + rustsecp256k1_v0_4_1_keypair invalid_keypair = {{ 0 }}; + rustsecp256k1_v0_4_1_xonly_pubkey pk[3]; + rustsecp256k1_v0_4_1_xonly_pubkey zero_pk; unsigned char sig[64]; /** setup **/ - rustsecp256k1_v0_4_0_context *none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_4_0_context *sign = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_4_0_context *vrfy = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_4_0_context *both = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_4_1_context *none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_4_1_context *sign = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN); + rustsecp256k1_v0_4_1_context *vrfy = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_4_1_context *both = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); int ecount; - rustsecp256k1_v0_4_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_testrand256(sk1); - rustsecp256k1_v0_4_0_testrand256(sk2); - rustsecp256k1_v0_4_0_testrand256(sk3); - rustsecp256k1_v0_4_0_testrand256(msg); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[0], sk1) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[1], sk2) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[2], sk3) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1); + rustsecp256k1_v0_4_1_testrand256(sk1); + rustsecp256k1_v0_4_1_testrand256(sk2); + rustsecp256k1_v0_4_1_testrand256(sk3); + rustsecp256k1_v0_4_1_testrand256(msg); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[0], sk1) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[1], sk2) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[2], sk3) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1); memset(&zero_pk, 0, sizeof(zero_pk)); /** main test body **/ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0); CHECK(ecount == 6); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(none, sig, msg, &pk[0]) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(none, sig, msg, &pk[0]) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, NULL) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0); CHECK(ecount == 6); - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(vrfy); - rustsecp256k1_v0_4_0_context_destroy(both); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(vrfy); + rustsecp256k1_v0_4_1_context_destroy(both); } -/* Checks that hash initialized by rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged has the +/* Checks that hash initialized by rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged has the * expected state. */ void test_schnorrsig_sha256_tagged(void) { char tag[17] = "BIP0340/challenge"; - rustsecp256k1_v0_4_0_sha256 sha; - rustsecp256k1_v0_4_0_sha256 sha_optimized; + rustsecp256k1_v0_4_1_sha256 sha; + rustsecp256k1_v0_4_1_sha256 sha_optimized; - rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); - rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); + rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); } @@ -192,26 +192,26 @@ void test_schnorrsig_sha256_tagged(void) { * Signs the message and checks that it's the same as expected_sig. */ void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) { unsigned char sig[64]; - rustsecp256k1_v0_4_0_keypair keypair; - rustsecp256k1_v0_4_0_xonly_pubkey pk, pk_expected; + rustsecp256k1_v0_4_1_keypair keypair; + rustsecp256k1_v0_4_1_xonly_pubkey pk, pk_expected; - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, expected_sig, 64) == 0); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, expected_sig, 64) == 0); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg, &pk)); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg, &pk)); } /* Helper function for schnorrsig_bip_vectors * Checks that both verify and verify_batch (TODO) return the same value as expected. */ void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized, const unsigned char *msg32, const unsigned char *sig, int expected) { - rustsecp256k1_v0_4_0_xonly_pubkey pk; + rustsecp256k1_v0_4_1_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk, pk_serialized)); - CHECK(expected == rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg32, &pk)); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk, pk_serialized)); + CHECK(expected == rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg32, &pk)); } /* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See @@ -407,9 +407,9 @@ void test_schnorrsig_bip_vectors(void) { 0xEB, 0x98, 0x98, 0xAE, 0x79, 0xB9, 0x76, 0x87, 0x66, 0xE4, 0xFA, 0xA0, 0x4A, 0x2D, 0x4A, 0x34 }; - rustsecp256k1_v0_4_0_xonly_pubkey pk_parsed; + rustsecp256k1_v0_4_1_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_parsed, pk)); } { /* Test vector 6 */ @@ -627,9 +627,9 @@ void test_schnorrsig_bip_vectors(void) { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30 }; - rustsecp256k1_v0_4_0_xonly_pubkey pk_parsed; + rustsecp256k1_v0_4_1_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_parsed, pk)); } } @@ -670,24 +670,24 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha void test_schnorrsig_sign(void) { unsigned char sk[32]; - rustsecp256k1_v0_4_0_keypair keypair; + rustsecp256k1_v0_4_1_keypair keypair; const unsigned char msg[32] = "this is a msg for a schnorrsig.."; unsigned char sig[64]; unsigned char zeros64[64] = { 0 }; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); /* Test different nonce functions */ memset(sig, 1, sizeof(sig)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) != 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) != 0); } #define N_SIGS 3 @@ -699,66 +699,66 @@ void test_schnorrsig_sign_verify(void) { unsigned char msg[N_SIGS][32]; unsigned char sig[N_SIGS][64]; size_t i; - rustsecp256k1_v0_4_0_keypair keypair; - rustsecp256k1_v0_4_0_xonly_pubkey pk; - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_keypair keypair; + rustsecp256k1_v0_4_1_xonly_pubkey pk; + rustsecp256k1_v0_4_1_scalar s; - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); for (i = 0; i < N_SIGS; i++) { - rustsecp256k1_v0_4_0_testrand256(msg[i]); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[i], msg[i], &pk)); + rustsecp256k1_v0_4_1_testrand256(msg[i]); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[i], msg[i], &pk)); } { /* Flip a few bits in the signature and in the message and check that * verify and verify_batch (TODO) fail */ - size_t sig_idx = rustsecp256k1_v0_4_0_testrand_int(N_SIGS); - size_t byte_idx = rustsecp256k1_v0_4_0_testrand_int(32); - unsigned char xorbyte = rustsecp256k1_v0_4_0_testrand_int(254)+1; + size_t sig_idx = rustsecp256k1_v0_4_1_testrand_int(N_SIGS); + size_t byte_idx = rustsecp256k1_v0_4_1_testrand_int(32); + unsigned char xorbyte = rustsecp256k1_v0_4_1_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); sig[sig_idx][byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_4_0_testrand_int(32); + byte_idx = rustsecp256k1_v0_4_1_testrand_int(32); sig[sig_idx][32+byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_4_0_testrand_int(32); + byte_idx = rustsecp256k1_v0_4_1_testrand_int(32); msg[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); msg[sig_idx][byte_idx] ^= xorbyte; /* Check that above bitflips have been reversed correctly */ - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); } /* Test overflowing s */ - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); memset(&sig[0][32], 0xFF, 32); - CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); /* Test negative s */ - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); - rustsecp256k1_v0_4_0_scalar_set_b32(&s, &sig[0][32], NULL); - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); - rustsecp256k1_v0_4_0_scalar_get_b32(&sig[0][32], &s); - CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, &sig[0][32], NULL); + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig[0][32], &s); + CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); } #undef N_SIGS void test_schnorrsig_taproot(void) { unsigned char sk[32]; - rustsecp256k1_v0_4_0_keypair keypair; - rustsecp256k1_v0_4_0_xonly_pubkey internal_pk; + rustsecp256k1_v0_4_1_keypair keypair; + rustsecp256k1_v0_4_1_xonly_pubkey internal_pk; unsigned char internal_pk_bytes[32]; - rustsecp256k1_v0_4_0_xonly_pubkey output_pk; + rustsecp256k1_v0_4_1_xonly_pubkey output_pk; unsigned char output_pk_bytes[32]; unsigned char tweak[32]; int pk_parity; @@ -766,27 +766,27 @@ void test_schnorrsig_taproot(void) { unsigned char sig[64]; /* Create output key */ - rustsecp256k1_v0_4_0_testrand256(sk); - CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); + rustsecp256k1_v0_4_1_testrand256(sk); + CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); /* In actual taproot the tweak would be hash of internal_pk */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1); /* Key spend */ - rustsecp256k1_v0_4_0_testrand256(msg); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + rustsecp256k1_v0_4_1_testrand256(msg); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); /* Verify key spend */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1); /* Script spend */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); /* Verify script spend */ - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); } void run_schnorrsig_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/num.h b/secp256k1-sys/depend/secp256k1/src/num.h deleted file mode 100644 index b0bb88e..0000000 --- a/secp256k1-sys/depend/secp256k1/src/num.h +++ /dev/null @@ -1,74 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_H -#define SECP256K1_NUM_H - -#ifndef USE_NUM_NONE - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(USE_NUM_GMP) -#include "num_gmp.h" -#else -#error "Please select num implementation" -#endif - -/** Copy a number. */ -static void rustsecp256k1_v0_4_0_num_copy(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a); - -/** Convert a number's absolute value to a binary big-endian string. - * There must be enough place. */ -static void rustsecp256k1_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_4_0_num *a); - -/** Set a number to the value of a binary big-endian string. */ -static void rustsecp256k1_v0_4_0_num_set_bin(rustsecp256k1_v0_4_0_num *r, const unsigned char *a, unsigned int alen); - -/** Compute a modular inverse. The input must be less than the modulus. */ -static void rustsecp256k1_v0_4_0_num_mod_inverse(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *m); - -/** Compute the jacobi symbol (a|b). b must be positive and odd. */ -static int rustsecp256k1_v0_4_0_num_jacobi(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Compare the absolute value of two numbers. */ -static int rustsecp256k1_v0_4_0_num_cmp(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Test whether two number are equal (including sign). */ -static int rustsecp256k1_v0_4_0_num_eq(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Add two (signed) numbers. */ -static void rustsecp256k1_v0_4_0_num_add(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Subtract two (signed) numbers. */ -static void rustsecp256k1_v0_4_0_num_sub(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Multiply two (signed) numbers. */ -static void rustsecp256k1_v0_4_0_num_mul(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b); - -/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, - even if r was negative. */ -static void rustsecp256k1_v0_4_0_num_mod(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *m); - -/** Right-shift the passed number by bits bits. */ -static void rustsecp256k1_v0_4_0_num_shift(rustsecp256k1_v0_4_0_num *r, int bits); - -/** Check whether a number is zero. */ -static int rustsecp256k1_v0_4_0_num_is_zero(const rustsecp256k1_v0_4_0_num *a); - -/** Check whether a number is one. */ -static int rustsecp256k1_v0_4_0_num_is_one(const rustsecp256k1_v0_4_0_num *a); - -/** Check whether a number is strictly negative. */ -static int rustsecp256k1_v0_4_0_num_is_neg(const rustsecp256k1_v0_4_0_num *a); - -/** Change a number's sign. */ -static void rustsecp256k1_v0_4_0_num_negate(rustsecp256k1_v0_4_0_num *r); - -#endif - -#endif /* SECP256K1_NUM_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/num_gmp.h b/secp256k1-sys/depend/secp256k1/src/num_gmp.h deleted file mode 100644 index a42bef1..0000000 --- a/secp256k1-sys/depend/secp256k1/src/num_gmp.h +++ /dev/null @@ -1,20 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_REPR_H -#define SECP256K1_NUM_REPR_H - -#include - -#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS) - -typedef struct { - mp_limb_t data[2*NUM_LIMBS]; - int neg; - int limbs; -} rustsecp256k1_v0_4_0_num; - -#endif /* SECP256K1_NUM_REPR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h b/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h deleted file mode 100644 index cef48dd..0000000 --- a/secp256k1-sys/depend/secp256k1/src/num_gmp_impl.h +++ /dev/null @@ -1,288 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_REPR_IMPL_H -#define SECP256K1_NUM_REPR_IMPL_H - -#include -#include -#include - -#include "util.h" -#include "num.h" - -#ifdef VERIFY -static void rustsecp256k1_v0_4_0_num_sanity(const rustsecp256k1_v0_4_0_num *a) { - VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0)); -} -#else -#define rustsecp256k1_v0_4_0_num_sanity(a) do { } while(0) -#endif - -static void rustsecp256k1_v0_4_0_num_copy(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a) { - *r = *a; -} - -static void rustsecp256k1_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_4_0_num *a) { - unsigned char tmp[65]; - int len = 0; - int shift = 0; - if (a->limbs>1 || a->data[0] != 0) { - len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs); - } - while (shift < len && tmp[shift] == 0) shift++; - VERIFY_CHECK(len-shift <= (int)rlen); - memset(r, 0, rlen - len + shift); - if (len > shift) { - memcpy(r + rlen - len + shift, tmp + shift, len - shift); - } - memset(tmp, 0, sizeof(tmp)); -} - -static void rustsecp256k1_v0_4_0_num_set_bin(rustsecp256k1_v0_4_0_num *r, const unsigned char *a, unsigned int alen) { - int len; - VERIFY_CHECK(alen > 0); - VERIFY_CHECK(alen <= 64); - len = mpn_set_str(r->data, a, alen, 256); - if (len == 0) { - r->data[0] = 0; - len = 1; - } - VERIFY_CHECK(len <= NUM_LIMBS*2); - r->limbs = len; - r->neg = 0; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1_v0_4_0_num_add_abs(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); - r->limbs = a->limbs; - if (c != 0) { - VERIFY_CHECK(r->limbs < 2*NUM_LIMBS); - r->data[r->limbs++] = c; - } -} - -static void rustsecp256k1_v0_4_0_num_sub_abs(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs); - (void)c; - VERIFY_CHECK(c == 0); - r->limbs = a->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1_v0_4_0_num_mod(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *m) { - rustsecp256k1_v0_4_0_num_sanity(r); - rustsecp256k1_v0_4_0_num_sanity(m); - - if (r->limbs >= m->limbs) { - mp_limb_t t[2*NUM_LIMBS]; - mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs); - memset(t, 0, sizeof(t)); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } - - if (r->neg && (r->limbs > 1 || r->data[0] != 0)) { - rustsecp256k1_v0_4_0_num_sub_abs(r, m, r); - r->neg = 0; - } -} - -static void rustsecp256k1_v0_4_0_num_mod_inverse(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *m) { - int i; - mp_limb_t g[NUM_LIMBS+1]; - mp_limb_t u[NUM_LIMBS+1]; - mp_limb_t v[NUM_LIMBS+1]; - mp_size_t sn; - mp_size_t gn; - rustsecp256k1_v0_4_0_num_sanity(a); - rustsecp256k1_v0_4_0_num_sanity(m); - - /** mpn_gcdext computes: (G,S) = gcdext(U,V), where - * * G = gcd(U,V) - * * G = U*S + V*T - * * U has equal or more limbs than V, and V has no padding - * If we set U to be (a padded version of) a, and V = m: - * G = a*S + m*T - * G = a*S mod m - * Assuming G=1: - * S = 1/a mod m - */ - VERIFY_CHECK(m->limbs <= NUM_LIMBS); - VERIFY_CHECK(m->data[m->limbs-1] != 0); - for (i = 0; i < m->limbs; i++) { - u[i] = (i < a->limbs) ? a->data[i] : 0; - v[i] = m->data[i]; - } - sn = NUM_LIMBS+1; - gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs); - (void)gn; - VERIFY_CHECK(gn == 1); - VERIFY_CHECK(g[0] == 1); - r->neg = a->neg ^ m->neg; - if (sn < 0) { - mpn_sub(r->data, m->data, m->limbs, r->data, -sn); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } else { - r->limbs = sn; - } - memset(g, 0, sizeof(g)); - memset(u, 0, sizeof(u)); - memset(v, 0, sizeof(v)); -} - -static int rustsecp256k1_v0_4_0_num_jacobi(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - int ret; - mpz_t ga, gb; - rustsecp256k1_v0_4_0_num_sanity(a); - rustsecp256k1_v0_4_0_num_sanity(b); - VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); - - mpz_inits(ga, gb, NULL); - - mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data); - mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data); - if (a->neg) { - mpz_neg(ga, ga); - } - - ret = mpz_jacobi(ga, gb); - - mpz_clears(ga, gb, NULL); - - return ret; -} - -static int rustsecp256k1_v0_4_0_num_is_one(const rustsecp256k1_v0_4_0_num *a) { - return (a->limbs == 1 && a->data[0] == 1); -} - -static int rustsecp256k1_v0_4_0_num_is_zero(const rustsecp256k1_v0_4_0_num *a) { - return (a->limbs == 1 && a->data[0] == 0); -} - -static int rustsecp256k1_v0_4_0_num_is_neg(const rustsecp256k1_v0_4_0_num *a) { - return (a->limbs > 1 || a->data[0] != 0) && a->neg; -} - -static int rustsecp256k1_v0_4_0_num_cmp(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - if (a->limbs > b->limbs) { - return 1; - } - if (a->limbs < b->limbs) { - return -1; - } - return mpn_cmp(a->data, b->data, a->limbs); -} - -static int rustsecp256k1_v0_4_0_num_eq(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - if (a->limbs > b->limbs) { - return 0; - } - if (a->limbs < b->limbs) { - return 0; - } - if ((a->neg && !rustsecp256k1_v0_4_0_num_is_zero(a)) != (b->neg && !rustsecp256k1_v0_4_0_num_is_zero(b))) { - return 0; - } - return mpn_cmp(a->data, b->data, a->limbs) == 0; -} - -static void rustsecp256k1_v0_4_0_num_subadd(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b, int bneg) { - if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ - r->neg = a->neg; - if (a->limbs >= b->limbs) { - rustsecp256k1_v0_4_0_num_add_abs(r, a, b); - } else { - rustsecp256k1_v0_4_0_num_add_abs(r, b, a); - } - } else { - if (rustsecp256k1_v0_4_0_num_cmp(a, b) > 0) { - r->neg = a->neg; - rustsecp256k1_v0_4_0_num_sub_abs(r, a, b); - } else { - r->neg = b->neg ^ bneg; - rustsecp256k1_v0_4_0_num_sub_abs(r, b, a); - } - } -} - -static void rustsecp256k1_v0_4_0_num_add(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - rustsecp256k1_v0_4_0_num_sanity(a); - rustsecp256k1_v0_4_0_num_sanity(b); - rustsecp256k1_v0_4_0_num_subadd(r, a, b, 0); -} - -static void rustsecp256k1_v0_4_0_num_sub(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - rustsecp256k1_v0_4_0_num_sanity(a); - rustsecp256k1_v0_4_0_num_sanity(b); - rustsecp256k1_v0_4_0_num_subadd(r, a, b, 1); -} - -static void rustsecp256k1_v0_4_0_num_mul(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) { - mp_limb_t tmp[2*NUM_LIMBS+1]; - rustsecp256k1_v0_4_0_num_sanity(a); - rustsecp256k1_v0_4_0_num_sanity(b); - - VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1); - if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) { - r->limbs = 1; - r->neg = 0; - r->data[0] = 0; - return; - } - if (a->limbs >= b->limbs) { - mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs); - } else { - mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs); - } - r->limbs = a->limbs + b->limbs; - if (r->limbs > 1 && tmp[r->limbs - 1]==0) { - r->limbs--; - } - VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS); - mpn_copyi(r->data, tmp, r->limbs); - r->neg = a->neg ^ b->neg; - memset(tmp, 0, sizeof(tmp)); -} - -static void rustsecp256k1_v0_4_0_num_shift(rustsecp256k1_v0_4_0_num *r, int bits) { - if (bits % GMP_NUMB_BITS) { - /* Shift within limbs. */ - mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); - } - if (bits >= GMP_NUMB_BITS) { - int i; - /* Shift full limbs. */ - for (i = 0; i < r->limbs; i++) { - int index = i + (bits / GMP_NUMB_BITS); - if (index < r->limbs && index < 2*NUM_LIMBS) { - r->data[i] = r->data[index]; - } else { - r->data[i] = 0; - } - } - } - while (r->limbs>1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1_v0_4_0_num_negate(rustsecp256k1_v0_4_0_num *r) { - r->neg ^= 1; -} - -#endif /* SECP256K1_NUM_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/num_impl.h b/secp256k1-sys/depend/secp256k1/src/num_impl.h deleted file mode 100644 index 880598e..0000000 --- a/secp256k1-sys/depend/secp256k1/src/num_impl.h +++ /dev/null @@ -1,24 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_IMPL_H -#define SECP256K1_NUM_IMPL_H - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include "num.h" - -#if defined(USE_NUM_GMP) -#include "num_gmp_impl.h" -#elif defined(USE_NUM_NONE) -/* Nothing. */ -#else -#error "Please select num implementation" -#endif - -#endif /* SECP256K1_NUM_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar.h b/secp256k1-sys/depend/secp256k1/src/scalar.h index a1f2c3a..a234ac3 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_SCALAR_H #define SECP256K1_SCALAR_H -#include "num.h" #include "util.h" #if defined HAVE_CONFIG_H @@ -25,93 +24,82 @@ #endif /** Clear a scalar to prevent the leak of sensitive data. */ -static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r); +static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r); /** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ -static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count); /** Access bits from a scalar. Not constant time. */ -static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`. * In: bin: pointer to a 32-byte array. * Out: r: scalar to be set. * overflow: non-zero if the scalar was bigger or equal to `n` before reduction, zero otherwise (can be NULL). */ -static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin, int *overflow); +static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin, int *overflow); /** Set a scalar from a big endian byte array and returns 1 if it is a valid * seckey and 0 otherwise. */ -static int rustsecp256k1_v0_4_0_scalar_set_b32_seckey(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin); +static int rustsecp256k1_v0_4_1_scalar_set_b32_seckey(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin); /** Set a scalar to an unsigned integer. */ -static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v); +static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v); /** Convert a scalar to a byte array. */ -static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a); +static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a); /** Add two scalars together (modulo the group order). Returns whether it overflowed. */ -static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b); +static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b); /** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ -static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag); +static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag); /** Multiply two scalars (modulo the group order). */ -static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b); +static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b); /** Shift a scalar right by some amount strictly between 0 and 16, returning * the low bits that were shifted off */ -static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n); - -/** Compute the square of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a); +static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n); /** Compute the inverse of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_4_0_scalar_inverse(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a); +static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a); /** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ -static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a); +static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a); /** Compute the complement of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a); +static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a); /** Check whether a scalar equals zero. */ -static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a); +static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a); /** Check whether a scalar equals one. */ -static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a); +static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a); /** Check whether a scalar, considered as an nonnegative integer, is even. */ -static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a); +static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a); /** Check whether a scalar is higher than the group order divided by 2. */ -static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a); +static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a); /** Conditionally negate a number, in constant time. * Returns -1 if the number was negated, 1 otherwise */ -static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *a, int flag); - -#ifndef USE_NUM_NONE -/** Convert a scalar to a number. */ -static void rustsecp256k1_v0_4_0_scalar_get_num(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_scalar *a); - -/** Get the order of the group as a number. */ -static void rustsecp256k1_v0_4_0_scalar_order_get_num(rustsecp256k1_v0_4_0_num *r); -#endif +static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *a, int flag); /** Compare two scalars. */ -static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b); +static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b); /** Find r1 and r2 such that r1+r2*2^128 = k. */ -static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k); +static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k); /** Find r1 and r2 such that r1+r2*lambda = k, - * where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_4_0_ge_mul_lambda). */ -static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k); + * where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_4_1_ge_mul_lambda). */ +static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k); /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ -static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift); +static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag); +static void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag); #endif /* SECP256K1_SCALAR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h index 1d5b49e..a470878 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint64_t d[4]; -} rustsecp256k1_v0_4_0_scalar; +} rustsecp256k1_v0_4_1_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h index 47d328b..27fbae9 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h @@ -7,6 +7,8 @@ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H +#include "modinv64_impl.h" + /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) @@ -24,37 +26,37 @@ #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 6 == offset >> 6) { - return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 6) + 1 < 4); return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ @@ -66,7 +68,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_4_0_scalar *r, unsigned int overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_reduce(rustsecp256k1_v0_4_1_scalar *r, unsigned int overflow) { uint128_t t; VERIFY_CHECK(overflow <= 1); t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; @@ -80,7 +82,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_ return overflow; } -static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { int overflow; uint128_t t = (uint128_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; @@ -90,13 +92,13 @@ static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - overflow = t + rustsecp256k1_v0_4_0_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_4_1_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_4_0_scalar_reduce(r, overflow); + rustsecp256k1_v0_4_1_scalar_reduce(r, overflow); return overflow; } -static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) { uint128_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ @@ -110,35 +112,35 @@ static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 64) == 0); - VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; - over = rustsecp256k1_v0_4_0_scalar_reduce(r, rustsecp256k1_v0_4_0_scalar_check_overflow(r)); + over = rustsecp256k1_v0_4_1_scalar_reduce(r, rustsecp256k1_v0_4_1_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) { +static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) { bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } -static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { - uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_4_0_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) { + uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_4_1_scalar_is_zero(a) == 0); uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; @@ -149,11 +151,11 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c r->d[3] = t & nonzero; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } -static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) { +static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_H_3); @@ -165,11 +167,11 @@ static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar return yes; } -static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) { +static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_0_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_1_scalar_negate */ uint64_t mask = !flag - 1; - uint64_t nonzero = (rustsecp256k1_v0_4_0_scalar_is_zero(r) != 0) - 1; + uint64_t nonzero = (rustsecp256k1_v0_4_1_scalar_is_zero(r) != 0) - 1; uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -212,28 +214,6 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar * VERIFY_CHECK(c1 >= th); \ } -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint64_t tl, th, th2, tl2; \ - { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ - c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ - th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ @@ -267,7 +247,7 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *r, const uint64_t *l) { +static void rustsecp256k1_v0_4_1_scalar_reduce_512(rustsecp256k1_v0_4_1_scalar *r, const uint64_t *l) { #ifdef USE_ASM_X86_64 /* Reduce 512 bits into 385. */ uint64_t m0, m1, m2, m3, m4, m5, m6; @@ -573,10 +553,10 @@ static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar * #endif /* Final reduction of r. */ - rustsecp256k1_v0_4_0_scalar_reduce(r, c + rustsecp256k1_v0_4_0_scalar_check_overflow(r)); + rustsecp256k1_v0_4_1_scalar_reduce(r, c + rustsecp256k1_v0_4_1_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_4_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static void rustsecp256k1_v0_4_1_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { #ifdef USE_ASM_X86_64 const uint64_t *pb = b->d; __asm__ __volatile__( @@ -743,158 +723,20 @@ static void rustsecp256k1_v0_4_0_scalar_mul_512(uint64_t l[8], const rustsecp256 #endif } -static void rustsecp256k1_v0_4_0_scalar_sqr_512(uint64_t l[8], const rustsecp256k1_v0_4_0_scalar *a) { -#ifdef USE_ASM_X86_64 - __asm__ __volatile__( - /* Preload */ - "movq 0(%%rdi), %%r11\n" - "movq 8(%%rdi), %%r12\n" - "movq 16(%%rdi), %%r13\n" - "movq 24(%%rdi), %%r14\n" - /* (rax,rdx) = a0 * a0 */ - "movq %%r11, %%rax\n" - "mulq %%r11\n" - /* Extract l0 */ - "movq %%rax, 0(%%rsi)\n" - /* (r8,r9,r10) = (rdx,0) */ - "movq %%rdx, %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a0 * a1 */ - "movq %%r11, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l1 */ - "movq %%r8, 8(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a0 * a2 */ - "movq %%r11, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a1 * a1 */ - "movq %%r12, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l2 */ - "movq %%r9, 16(%%rsi)\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += 2 * a0 * a3 */ - "movq %%r11, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += 2 * a1 * a2 */ - "movq %%r12, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* Extract l3 */ - "movq %%r10, 24(%%rsi)\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a1 * a3 */ - "movq %%r12, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a2 * a2 */ - "movq %%r13, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l4 */ - "movq %%r8, 32(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a2 * a3 */ - "movq %%r13, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l5 */ - "movq %%r9, 40(%%rsi)\n" - /* (r10,r8) += a3 * a3 */ - "movq %%r14, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - /* Extract l6 */ - "movq %%r10, 48(%%rsi)\n" - /* Extract l7 */ - "movq %%r8, 56(%%rsi)\n" - : - : "S"(l), "D"(a->d) - : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); -#else - /* 160 bit accumulator. */ - uint64_t c0 = 0, c1 = 0; - uint32_t c2 = 0; - - /* l[0..7] = a[0..3] * b[0..3]. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd_fast(a->d[3], a->d[3]); - extract_fast(l[6]); - VERIFY_CHECK(c1 == 0); - l[7] = c0; -#endif -} - #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast -#undef muladd2 #undef extract #undef extract_fast -static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { uint64_t l[8]; - rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b); - rustsecp256k1_v0_4_0_scalar_reduce_512(r, l); + rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b); + rustsecp256k1_v0_4_1_scalar_reduce_512(r, l); } -static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) { +static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -906,13 +748,7 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { - uint64_t l[8]; - rustsecp256k1_v0_4_0_scalar_sqr_512(l, a); - rustsecp256k1_v0_4_0_scalar_reduce_512(r, l); -} - -static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) { +static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = 0; @@ -923,17 +759,17 @@ static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r r2->d[3] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift) { uint64_t l[8]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b); shiftlimbs = shift >> 6; shiftlow = shift & 0x3F; shifthigh = 64 - shiftlow; @@ -941,10 +777,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp2 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_4_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); + rustsecp256k1_v0_4_1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint64_t)0); @@ -955,4 +791,78 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4 r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); } +static void rustsecp256k1_v0_4_1_scalar_from_signed62(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a) { + const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; + + /* The output from rustsecp256k1_v0_4_1_modinv64{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). + */ + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->d[0] = a0 | a1 << 62; + r->d[1] = a1 >> 2 | a2 << 60; + r->d[2] = a2 >> 4 | a3 << 58; + r->d[3] = a3 >> 6 | a4 << 56; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0); +#endif +} + +static void rustsecp256k1_v0_4_1_scalar_to_signed62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_scalar *a) { + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(a) == 0); +#endif + + r->v[0] = a0 & M62; + r->v[1] = (a0 >> 62 | a1 << 2) & M62; + r->v[2] = (a1 >> 60 | a2 << 4) & M62; + r->v[3] = (a2 >> 58 | a3 << 6) & M62; + r->v[4] = a3 >> 56; +} + +static const rustsecp256k1_v0_4_1_modinv64_modinfo rustsecp256k1_v0_4_1_const_modinfo_scalar = { + {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}}, + 0x34F20099AA774EC1LL +}; + +static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + rustsecp256k1_v0_4_1_modinv64_signed62 s; +#ifdef VERIFY + int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x); +#endif + rustsecp256k1_v0_4_1_scalar_to_signed62(&s, x); + rustsecp256k1_v0_4_1_modinv64(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar); + rustsecp256k1_v0_4_1_scalar_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in); +#endif +} + +static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + rustsecp256k1_v0_4_1_modinv64_signed62 s; +#ifdef VERIFY + int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x); +#endif + rustsecp256k1_v0_4_1_scalar_to_signed62(&s, x); + rustsecp256k1_v0_4_1_modinv64_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar); + rustsecp256k1_v0_4_1_scalar_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in); +#endif +} + +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) { + return !(a->d[0] & 1); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h index 35f7549..383abb4 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint32_t d[8]; -} rustsecp256k1_v0_4_0_scalar; +} rustsecp256k1_v0_4_1_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h index 44e3809..7efac6f 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h @@ -7,6 +7,8 @@ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H +#include "modinv32_impl.h" + /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint32_t)0xD0364141UL) #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) @@ -34,7 +36,7 @@ #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; @@ -45,7 +47,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_ r->d[7] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; @@ -56,23 +58,23 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v r->d[7] = 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 5 == offset >> 5) { - return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 5) + 1 < 8); return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ @@ -90,7 +92,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_4_0_scalar *r, uint32_t overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_reduce(rustsecp256k1_v0_4_1_scalar *r, uint32_t overflow) { uint64_t t; VERIFY_CHECK(overflow <= 1); t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; @@ -112,7 +114,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_ return overflow; } -static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; @@ -130,13 +132,13 @@ static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - overflow = t + rustsecp256k1_v0_4_0_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_4_1_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_4_0_scalar_reduce(r, overflow); + rustsecp256k1_v0_4_1_scalar_reduce(r, overflow); return overflow; } -static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) { uint64_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -158,11 +160,11 @@ static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, r->d[7] = t & 0xFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 32) == 0); - VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; @@ -172,13 +174,13 @@ static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; - over = rustsecp256k1_v0_4_0_scalar_reduce(r, rustsecp256k1_v0_4_0_scalar_check_overflow(r)); + over = rustsecp256k1_v0_4_1_scalar_reduce(r, rustsecp256k1_v0_4_1_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) { +static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) { bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; @@ -189,12 +191,12 @@ static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustse bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_0_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) { + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_1_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -213,11 +215,11 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c r->d[7] = t & nonzero; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) { +static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_H_7); @@ -235,11 +237,11 @@ static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar return yes; } -static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) { +static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_0_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_1_scalar_negate */ uint32_t mask = !flag - 1; - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_0_scalar_is_zero(r) == 0); + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_1_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -291,28 +293,6 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar * VERIFY_CHECK(c1 >= th); \ } -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint32_t tl, th, th2, tl2; \ - { \ - uint64_t t = (uint64_t)a * b; \ - th = t >> 32; /* at most 0xFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ - c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ - th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ @@ -346,7 +326,7 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *r, const uint32_t *l) { +static void rustsecp256k1_v0_4_1_scalar_reduce_512(rustsecp256k1_v0_4_1_scalar *r, const uint32_t *l) { uint64_t c; uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; @@ -485,10 +465,10 @@ static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar * r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; /* Final reduction of r. */ - rustsecp256k1_v0_4_0_scalar_reduce(r, c + rustsecp256k1_v0_4_0_scalar_check_overflow(r)); + rustsecp256k1_v0_4_1_scalar_reduce(r, c + rustsecp256k1_v0_4_1_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_4_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static void rustsecp256k1_v0_4_1_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; @@ -576,81 +556,20 @@ static void rustsecp256k1_v0_4_0_scalar_mul_512(uint32_t *l, const rustsecp256k1 l[15] = c0; } -static void rustsecp256k1_v0_4_0_scalar_sqr_512(uint32_t *l, const rustsecp256k1_v0_4_0_scalar *a) { - /* 96 bit accumulator. */ - uint32_t c0 = 0, c1 = 0, c2 = 0; - - /* l[0..15] = a[0..7]^2. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[0], a->d[4]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[0], a->d[5]); - muladd2(a->d[1], a->d[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd2(a->d[0], a->d[6]); - muladd2(a->d[1], a->d[5]); - muladd2(a->d[2], a->d[4]); - muladd(a->d[3], a->d[3]); - extract(l[6]); - muladd2(a->d[0], a->d[7]); - muladd2(a->d[1], a->d[6]); - muladd2(a->d[2], a->d[5]); - muladd2(a->d[3], a->d[4]); - extract(l[7]); - muladd2(a->d[1], a->d[7]); - muladd2(a->d[2], a->d[6]); - muladd2(a->d[3], a->d[5]); - muladd(a->d[4], a->d[4]); - extract(l[8]); - muladd2(a->d[2], a->d[7]); - muladd2(a->d[3], a->d[6]); - muladd2(a->d[4], a->d[5]); - extract(l[9]); - muladd2(a->d[3], a->d[7]); - muladd2(a->d[4], a->d[6]); - muladd(a->d[5], a->d[5]); - extract(l[10]); - muladd2(a->d[4], a->d[7]); - muladd2(a->d[5], a->d[6]); - extract(l[11]); - muladd2(a->d[5], a->d[7]); - muladd(a->d[6], a->d[6]); - extract(l[12]); - muladd2(a->d[6], a->d[7]); - extract(l[13]); - muladd_fast(a->d[7], a->d[7]); - extract_fast(l[14]); - VERIFY_CHECK(c1 == 0); - l[15] = c0; -} - #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast -#undef muladd2 #undef extract #undef extract_fast -static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { uint32_t l[16]; - rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b); - rustsecp256k1_v0_4_0_scalar_reduce_512(r, l); + rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b); + rustsecp256k1_v0_4_1_scalar_reduce_512(r, l); } -static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) { +static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -666,13 +585,7 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { - uint32_t l[16]; - rustsecp256k1_v0_4_0_scalar_sqr_512(l, a); - rustsecp256k1_v0_4_0_scalar_reduce_512(r, l); -} - -static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) { +static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = k->d[2]; @@ -691,17 +604,17 @@ static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r r2->d[7] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift) { uint32_t l[16]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b); shiftlimbs = shift >> 5; shiftlow = shift & 0x1F; shifthigh = 32 - shiftlow; @@ -713,10 +626,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp2 r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_4_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); + rustsecp256k1_v0_4_1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint32_t)0); @@ -731,4 +644,92 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4 r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); } +static void rustsecp256k1_v0_4_1_scalar_from_signed30(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a) { + const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], + a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; + + /* The output from rustsecp256k1_v0_4_1_modinv32{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). + */ + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->d[0] = a0 | a1 << 30; + r->d[1] = a1 >> 2 | a2 << 28; + r->d[2] = a2 >> 4 | a3 << 26; + r->d[3] = a3 >> 6 | a4 << 24; + r->d[4] = a4 >> 8 | a5 << 22; + r->d[5] = a5 >> 10 | a6 << 20; + r->d[6] = a6 >> 12 | a7 << 18; + r->d[7] = a7 >> 14 | a8 << 16; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0); +#endif +} + +static void rustsecp256k1_v0_4_1_scalar_to_signed30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_scalar *a) { + const uint32_t M30 = UINT32_MAX >> 2; + const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], + a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(a) == 0); +#endif + + r->v[0] = a0 & M30; + r->v[1] = (a0 >> 30 | a1 << 2) & M30; + r->v[2] = (a1 >> 28 | a2 << 4) & M30; + r->v[3] = (a2 >> 26 | a3 << 6) & M30; + r->v[4] = (a3 >> 24 | a4 << 8) & M30; + r->v[5] = (a4 >> 22 | a5 << 10) & M30; + r->v[6] = (a5 >> 20 | a6 << 12) & M30; + r->v[7] = (a6 >> 18 | a7 << 14) & M30; + r->v[8] = a7 >> 16; +} + +static const rustsecp256k1_v0_4_1_modinv32_modinfo rustsecp256k1_v0_4_1_const_modinfo_scalar = { + {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}}, + 0x2A774EC1L +}; + +static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + rustsecp256k1_v0_4_1_modinv32_signed30 s; +#ifdef VERIFY + int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x); +#endif + rustsecp256k1_v0_4_1_scalar_to_signed30(&s, x); + rustsecp256k1_v0_4_1_modinv32(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar); + rustsecp256k1_v0_4_1_scalar_from_signed30(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in); +#endif +} + +static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + rustsecp256k1_v0_4_1_modinv32_signed30 s; +#ifdef VERIFY + int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x); +#endif + rustsecp256k1_v0_4_1_scalar_to_signed30(&s, x); + rustsecp256k1_v0_4_1_modinv32_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar); + rustsecp256k1_v0_4_1_scalar_from_signed30(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in); +#endif +} + +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) { + return !(a->d[0] & 1); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h index ca80ef1..55719d5 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h @@ -28,232 +28,13 @@ #error "Please select wide multiplication implementation" #endif -static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); -static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); +static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); -#ifndef USE_NUM_NONE -static void rustsecp256k1_v0_4_0_scalar_get_num(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_scalar *a) { - unsigned char c[32]; - rustsecp256k1_v0_4_0_scalar_get_b32(c, a); - rustsecp256k1_v0_4_0_num_set_bin(r, c, 32); -} - -/** secp256k1 curve order, see rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe in ecdsa_impl.h */ -static void rustsecp256k1_v0_4_0_scalar_order_get_num(rustsecp256k1_v0_4_0_num *r) { -#if defined(EXHAUSTIVE_TEST_ORDER) - static const unsigned char order[32] = { - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER - }; -#else - static const unsigned char order[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 - }; -#endif - rustsecp256k1_v0_4_0_num_set_bin(r, order, 32); -} -#endif - -static int rustsecp256k1_v0_4_0_scalar_set_b32_seckey(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin) { +static int rustsecp256k1_v0_4_1_scalar_set_b32_seckey(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin) { int overflow; - rustsecp256k1_v0_4_0_scalar_set_b32(r, bin, &overflow); - return (!overflow) & (!rustsecp256k1_v0_4_0_scalar_is_zero(r)); -} - -static void rustsecp256k1_v0_4_0_scalar_inverse(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *x) { -#if defined(EXHAUSTIVE_TEST_ORDER) - int i; - *r = 0; - for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) - if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) - *r = i; - /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus - * have a composite group order; fix it in exhaustive_tests.c). */ - VERIFY_CHECK(*r != 0); -} -#else - rustsecp256k1_v0_4_0_scalar *t; - int i; - /* First compute xN as x ^ (2^N - 1) for some values of N, - * and uM as x ^ M for some values of M. */ - rustsecp256k1_v0_4_0_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; - rustsecp256k1_v0_4_0_scalar u2, u5, u9, u11, u13; - - rustsecp256k1_v0_4_0_scalar_sqr(&u2, x); - rustsecp256k1_v0_4_0_scalar_mul(&x2, &u2, x); - rustsecp256k1_v0_4_0_scalar_mul(&u5, &u2, &x2); - rustsecp256k1_v0_4_0_scalar_mul(&x3, &u5, &u2); - rustsecp256k1_v0_4_0_scalar_mul(&u9, &x3, &u2); - rustsecp256k1_v0_4_0_scalar_mul(&u11, &u9, &u2); - rustsecp256k1_v0_4_0_scalar_mul(&u13, &u11, &u2); - - rustsecp256k1_v0_4_0_scalar_sqr(&x6, &u13); - rustsecp256k1_v0_4_0_scalar_sqr(&x6, &x6); - rustsecp256k1_v0_4_0_scalar_mul(&x6, &x6, &u11); - - rustsecp256k1_v0_4_0_scalar_sqr(&x8, &x6); - rustsecp256k1_v0_4_0_scalar_sqr(&x8, &x8); - rustsecp256k1_v0_4_0_scalar_mul(&x8, &x8, &x2); - - rustsecp256k1_v0_4_0_scalar_sqr(&x14, &x8); - for (i = 0; i < 5; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&x14, &x14); - } - rustsecp256k1_v0_4_0_scalar_mul(&x14, &x14, &x6); - - rustsecp256k1_v0_4_0_scalar_sqr(&x28, &x14); - for (i = 0; i < 13; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&x28, &x28); - } - rustsecp256k1_v0_4_0_scalar_mul(&x28, &x28, &x14); - - rustsecp256k1_v0_4_0_scalar_sqr(&x56, &x28); - for (i = 0; i < 27; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&x56, &x56); - } - rustsecp256k1_v0_4_0_scalar_mul(&x56, &x56, &x28); - - rustsecp256k1_v0_4_0_scalar_sqr(&x112, &x56); - for (i = 0; i < 55; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&x112, &x112); - } - rustsecp256k1_v0_4_0_scalar_mul(&x112, &x112, &x56); - - rustsecp256k1_v0_4_0_scalar_sqr(&x126, &x112); - for (i = 0; i < 13; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(&x126, &x126); - } - rustsecp256k1_v0_4_0_scalar_mul(&x126, &x126, &x14); - - /* Then accumulate the final result (t starts at x126). */ - t = &x126; - for (i = 0; i < 3; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 5; i++) { /* 00 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 3; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 000 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 10; i++) { /* 0000000 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 9; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x8); /* 11111111 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 5; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 10; i++) { /* 000000 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 4; i++) { - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 00000 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 8; i++) { /* 00 */ - rustsecp256k1_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1_v0_4_0_scalar_mul(r, t, &x6); /* 111111 */ -} - -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a) { - return !(a->d[0] & 1); -} -#endif - -static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *x) { -#if defined(USE_SCALAR_INV_BUILTIN) - rustsecp256k1_v0_4_0_scalar_inverse(r, x); -#elif defined(USE_SCALAR_INV_NUM) - unsigned char b[32]; - rustsecp256k1_v0_4_0_num n, m; - rustsecp256k1_v0_4_0_scalar t = *x; - rustsecp256k1_v0_4_0_scalar_get_b32(b, &t); - rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1_v0_4_0_scalar_order_get_num(&m); - rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m); - rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n); - rustsecp256k1_v0_4_0_scalar_set_b32(r, b, NULL); - /* Verify that the inverse was computed correctly, without GMP code. */ - rustsecp256k1_v0_4_0_scalar_mul(&t, &t, r); - CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&t)); -#else -#error "Please select scalar inverse implementation" -#endif + rustsecp256k1_v0_4_1_scalar_set_b32(r, bin, &overflow); + return (!overflow) & (!rustsecp256k1_v0_4_1_scalar_is_zero(r)); } /* These parameters are generated using sage/gen_exhaustive_groups.sage. */ @@ -272,7 +53,7 @@ static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ -static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) { +static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) { *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } @@ -280,13 +61,13 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is: */ -static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_const_lambda = SECP256K1_SCALAR_CONST( +static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_const_lambda = SECP256K1_SCALAR_CONST( 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL, 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL ); #ifdef VERIFY -static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_v0_4_0_scalar *r1, const rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k); +static void rustsecp256k1_v0_4_1_scalar_split_lambda_verify(const rustsecp256k1_v0_4_1_scalar *r1, const rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k); #endif /* @@ -339,44 +120,44 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_ * * See proof below. */ -static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) { - rustsecp256k1_v0_4_0_scalar c1, c2; - static const rustsecp256k1_v0_4_0_scalar minus_b1 = SECP256K1_SCALAR_CONST( +static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) { + rustsecp256k1_v0_4_1_scalar c1, c2; + static const rustsecp256k1_v0_4_1_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL ); - static const rustsecp256k1_v0_4_0_scalar minus_b2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_4_1_scalar minus_b2 = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); - static const rustsecp256k1_v0_4_0_scalar g1 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_4_1_scalar g1 = SECP256K1_SCALAR_CONST( 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL, 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL ); - static const rustsecp256k1_v0_4_0_scalar g2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_4_1_scalar g2 = SECP256K1_SCALAR_CONST( 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); /* these _var calls are constant time since the shift amount is constant */ - rustsecp256k1_v0_4_0_scalar_mul_shift_var(&c1, k, &g1, 384); - rustsecp256k1_v0_4_0_scalar_mul_shift_var(&c2, k, &g2, 384); - rustsecp256k1_v0_4_0_scalar_mul(&c1, &c1, &minus_b1); - rustsecp256k1_v0_4_0_scalar_mul(&c2, &c2, &minus_b2); - rustsecp256k1_v0_4_0_scalar_add(r2, &c1, &c2); - rustsecp256k1_v0_4_0_scalar_mul(r1, r2, &rustsecp256k1_v0_4_0_const_lambda); - rustsecp256k1_v0_4_0_scalar_negate(r1, r1); - rustsecp256k1_v0_4_0_scalar_add(r1, r1, k); + rustsecp256k1_v0_4_1_scalar_mul_shift_var(&c1, k, &g1, 384); + rustsecp256k1_v0_4_1_scalar_mul_shift_var(&c2, k, &g2, 384); + rustsecp256k1_v0_4_1_scalar_mul(&c1, &c1, &minus_b1); + rustsecp256k1_v0_4_1_scalar_mul(&c2, &c2, &minus_b2); + rustsecp256k1_v0_4_1_scalar_add(r2, &c1, &c2); + rustsecp256k1_v0_4_1_scalar_mul(r1, r2, &rustsecp256k1_v0_4_1_const_lambda); + rustsecp256k1_v0_4_1_scalar_negate(r1, r1); + rustsecp256k1_v0_4_1_scalar_add(r1, r1, k); #ifdef VERIFY - rustsecp256k1_v0_4_0_scalar_split_lambda_verify(r1, r2, k); + rustsecp256k1_v0_4_1_scalar_split_lambda_verify(r1, r2, k); #endif } #ifdef VERIFY /* - * Proof for rustsecp256k1_v0_4_0_scalar_split_lambda's bounds. + * Proof for rustsecp256k1_v0_4_1_scalar_split_lambda's bounds. * * Let * - epsilon1 = 2^256 * |g1/2^384 - b2/d| @@ -479,8 +260,8 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar * * Q.E.D. */ -static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_v0_4_0_scalar *r1, const rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) { - rustsecp256k1_v0_4_0_scalar s; +static void rustsecp256k1_v0_4_1_scalar_split_lambda_verify(const rustsecp256k1_v0_4_1_scalar *r1, const rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) { + rustsecp256k1_v0_4_1_scalar s; unsigned char buf1[32]; unsigned char buf2[32]; @@ -496,19 +277,19 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_ 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed }; - rustsecp256k1_v0_4_0_scalar_mul(&s, &rustsecp256k1_v0_4_0_const_lambda, r2); - rustsecp256k1_v0_4_0_scalar_add(&s, &s, r1); - VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_eq(&s, k)); + rustsecp256k1_v0_4_1_scalar_mul(&s, &rustsecp256k1_v0_4_1_const_lambda, r2); + rustsecp256k1_v0_4_1_scalar_add(&s, &s, r1); + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_eq(&s, k)); - rustsecp256k1_v0_4_0_scalar_negate(&s, r1); - rustsecp256k1_v0_4_0_scalar_get_b32(buf1, r1); - rustsecp256k1_v0_4_0_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_4_0_memcmp_var(buf2, k1_bound, 32) < 0); + rustsecp256k1_v0_4_1_scalar_negate(&s, r1); + rustsecp256k1_v0_4_1_scalar_get_b32(buf1, r1); + rustsecp256k1_v0_4_1_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_4_1_memcmp_var(buf2, k1_bound, 32) < 0); - rustsecp256k1_v0_4_0_scalar_negate(&s, r2); - rustsecp256k1_v0_4_0_scalar_get_b32(buf1, r2); - rustsecp256k1_v0_4_0_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_4_0_memcmp_var(buf2, k2_bound, 32) < 0); + rustsecp256k1_v0_4_1_scalar_negate(&s, r2); + rustsecp256k1_v0_4_1_scalar_get_b32(buf1, r2); + rustsecp256k1_v0_4_1_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_4_1_memcmp_var(buf2, k2_bound, 32) < 0); } #endif /* VERIFY */ #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low.h b/secp256k1-sys/depend/secp256k1/src/scalar_low.h index 18df094..454ab5a 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low.h @@ -10,7 +10,7 @@ #include /** A scalar modulo the group order of the secp256k1 curve. */ -typedef uint32_t rustsecp256k1_v0_4_0_scalar; +typedef uint32_t rustsecp256k1_v0_4_1_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0) diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h index 0cf2c57..e321eaf 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h @@ -11,43 +11,43 @@ #include -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) { return !(*a & 1); } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) { *r = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) { *r = v; } +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) { *r = 0; } +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) { *r = v; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); else return 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) { - return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) { + return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count); } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } -static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; return *r < *b; } -static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) { if (flag && bit < 32) *r += ((uint32_t)1 << bit); #ifdef VERIFY VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); - VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) { int i; int over = 0; *r = 0; @@ -61,16 +61,16 @@ static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, if (overflow) *overflow = over; } -static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) { +static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) { memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) { return *a == 0; } -static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { +static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) { if (*a == 0) { *r = 0; } else { @@ -78,24 +78,24 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c } } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) { return *a == 1; } -static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) { +static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) { return *a > EXHAUSTIVE_TEST_ORDER / 2; } -static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) { - if (flag) rustsecp256k1_v0_4_0_scalar_negate(r, r); +static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) { + if (flag) rustsecp256k1_v0_4_1_scalar_negate(r, r); return flag ? -1 : 1; } -static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; } -static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) { +static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -104,20 +104,16 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) { - *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; -} - -static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *a) { +static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *a) { *r1 = *a; *r2 = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) { return *a == *b; } -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r, sizeof(*r)); mask0 = flag + ~((uint32_t)0); @@ -125,4 +121,19 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4 *r = (*r & mask0) | (*a & mask1); } +static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} + +static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) { + rustsecp256k1_v0_4_1_scalar_inverse(r, x); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scratch.h b/secp256k1-sys/depend/secp256k1/src/scratch.h index e5148b7..5751d37 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch.h @@ -4,12 +4,12 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_SCRATCH_ -#define _SECP256K1_SCRATCH_ +#ifndef SECP256K1_SCRATCH_H +#define SECP256K1_SCRATCH_H /* The typedef is used internally; the struct name is used in the public API * (where it is exposed as a different typedef) */ -typedef struct rustsecp256k1_v0_4_0_scratch_space_struct { +typedef struct rustsecp256k1_v0_4_1_scratch_space_struct { /** guard against interpreting this object as other types */ unsigned char magic[8]; /** actual allocated data */ @@ -19,24 +19,24 @@ typedef struct rustsecp256k1_v0_4_0_scratch_space_struct { size_t alloc_size; /** maximum size available to allocate */ size_t max_size; -} rustsecp256k1_v0_4_0_scratch; +} rustsecp256k1_v0_4_1_scratch; -static rustsecp256k1_v0_4_0_scratch* rustsecp256k1_v0_4_0_scratch_create(const rustsecp256k1_v0_4_0_callback* error_callback, size_t max_size); +static rustsecp256k1_v0_4_1_scratch* rustsecp256k1_v0_4_1_scratch_create(const rustsecp256k1_v0_4_1_callback* error_callback, size_t max_size); -static void rustsecp256k1_v0_4_0_scratch_destroy(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch); +static void rustsecp256k1_v0_4_1_scratch_destroy(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch); /** Returns an opaque object used to "checkpoint" a scratch space. Used - * with `rustsecp256k1_v0_4_0_scratch_apply_checkpoint` to undo allocations. */ -static size_t rustsecp256k1_v0_4_0_scratch_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch); + * with `rustsecp256k1_v0_4_1_scratch_apply_checkpoint` to undo allocations. */ +static size_t rustsecp256k1_v0_4_1_scratch_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch); -/** Applies a check point received from `rustsecp256k1_v0_4_0_scratch_checkpoint`, +/** Applies a check point received from `rustsecp256k1_v0_4_1_scratch_checkpoint`, * undoing all allocations since that point. */ -static void rustsecp256k1_v0_4_0_scratch_apply_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t checkpoint); +static void rustsecp256k1_v0_4_1_scratch_apply_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t checkpoint); /** Returns the maximum allocation the scratch space will allow */ -static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch, size_t n_objects); +static size_t rustsecp256k1_v0_4_1_scratch_max_allocation(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch, size_t n_objects); /** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */ -static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t n); +static void *rustsecp256k1_v0_4_1_scratch_alloc(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t n); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h index 3f97e0e..2a8ba9f 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h @@ -4,35 +4,35 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#ifndef _SECP256K1_SCRATCH_IMPL_H_ -#define _SECP256K1_SCRATCH_IMPL_H_ +#ifndef SECP256K1_SCRATCH_IMPL_H +#define SECP256K1_SCRATCH_IMPL_H #include "util.h" #include "scratch.h" -static size_t rustsecp256k1_v0_4_0_scratch_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch) { - if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space"); +static size_t rustsecp256k1_v0_4_1_scratch_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch) { + if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space"); return 0; } return scratch->alloc_size; } -static void rustsecp256k1_v0_4_0_scratch_apply_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t checkpoint) { - if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space"); +static void rustsecp256k1_v0_4_1_scratch_apply_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t checkpoint) { + if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space"); return; } if (checkpoint > scratch->alloc_size) { - rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid checkpoint"); + rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid checkpoint"); return; } scratch->alloc_size = checkpoint; } -static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch, size_t objects) { - if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space"); +static size_t rustsecp256k1_v0_4_1_scratch_max_allocation(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch, size_t objects) { + if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space"); return 0; } /* Ensure that multiplication will not wrap around */ @@ -45,7 +45,7 @@ static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0 return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); } -static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t size) { +static void *rustsecp256k1_v0_4_1_scratch_alloc(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t size) { void *ret; size_t rounded_size; @@ -56,8 +56,8 @@ static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callb } size = rounded_size; - if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space"); + if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space"); return NULL; } diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c b/secp256k1-sys/depend/secp256k1/src/secp256k1.c index d0d3f85..290a519 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c @@ -4,12 +4,13 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#define SECP256K1_BUILD + +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "assumptions.h" #include "util.h" -#include "num_impl.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -22,75 +23,79 @@ #include "scratch_impl.h" #include "selftest.h" +#ifdef SECP256K1_NO_BUILD +# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c" +#endif + #if defined(VALGRIND) # include #endif #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS #include #include -static void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data); -void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data); #endif -static const rustsecp256k1_v0_4_0_callback default_illegal_callback = { - rustsecp256k1_v0_4_0_default_illegal_callback_fn, +static const rustsecp256k1_v0_4_1_callback default_illegal_callback = { + rustsecp256k1_v0_4_1_default_illegal_callback_fn, NULL }; -static const rustsecp256k1_v0_4_0_callback default_error_callback = { - rustsecp256k1_v0_4_0_default_error_callback_fn, +static const rustsecp256k1_v0_4_1_callback default_error_callback = { + rustsecp256k1_v0_4_1_default_error_callback_fn, NULL }; -struct rustsecp256k1_v0_4_0_context_struct { - rustsecp256k1_v0_4_0_ecmult_context ecmult_ctx; - rustsecp256k1_v0_4_0_ecmult_gen_context ecmult_gen_ctx; - rustsecp256k1_v0_4_0_callback illegal_callback; - rustsecp256k1_v0_4_0_callback error_callback; +struct rustsecp256k1_v0_4_1_context_struct { + rustsecp256k1_v0_4_1_ecmult_context ecmult_ctx; + rustsecp256k1_v0_4_1_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_4_1_callback illegal_callback; + rustsecp256k1_v0_4_1_callback error_callback; int declassify; }; -static const rustsecp256k1_v0_4_0_context rustsecp256k1_v0_4_0_context_no_precomp_ = { +static const rustsecp256k1_v0_4_1_context rustsecp256k1_v0_4_1_context_no_precomp_ = { { 0 }, { 0 }, - { rustsecp256k1_v0_4_0_default_illegal_callback_fn, 0 }, - { rustsecp256k1_v0_4_0_default_error_callback_fn, 0 }, + { rustsecp256k1_v0_4_1_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_4_1_default_error_callback_fn, 0 }, 0 }; -const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp = &rustsecp256k1_v0_4_0_context_no_precomp_; +const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp = &rustsecp256k1_v0_4_1_context_no_precomp_; -size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) { - size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context)); +size_t rustsecp256k1_v0_4_1_context_preallocated_size(unsigned int flags) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context)); /* A return value of 0 is reserved as an indicator for errors when we call this function internally. */ VERIFY_CHECK(ret != 0); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - rustsecp256k1_v0_4_0_callback_call(&default_illegal_callback, + rustsecp256k1_v0_4_1_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } @@ -104,87 +109,87 @@ size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) { return ret; } -size_t rustsecp256k1_v0_4_0_context_preallocated_clone_size(const rustsecp256k1_v0_4_0_context* ctx) { - size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context)); +size_t rustsecp256k1_v0_4_1_context_preallocated_clone_size(const rustsecp256k1_v0_4_1_context* ctx) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context)); VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; } - if (rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)) { + if (rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)) { ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; } return ret; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_create(void* prealloc, unsigned int flags) { void* const base = prealloc; size_t prealloc_size; - rustsecp256k1_v0_4_0_context* ret; + rustsecp256k1_v0_4_1_context* ret; - if (!rustsecp256k1_v0_4_0_selftest()) { - rustsecp256k1_v0_4_0_callback_call(&default_error_callback, "self test failed"); + if (!rustsecp256k1_v0_4_1_selftest()) { + rustsecp256k1_v0_4_1_callback_call(&default_error_callback, "self test failed"); } - prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_size(flags); + prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_size(flags); if (prealloc_size == 0) { return NULL; } VERIFY_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_4_0_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_0_context), base, prealloc_size); + ret = (rustsecp256k1_v0_4_1_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_1_context), base, prealloc_size); ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; - rustsecp256k1_v0_4_0_ecmult_context_init(&ret->ecmult_ctx); - rustsecp256k1_v0_4_0_ecmult_gen_context_init(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_init(&ret->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); - /* Flags have been checked by rustsecp256k1_v0_4_0_context_preallocated_size. */ + /* Flags have been checked by rustsecp256k1_v0_4_1_context_preallocated_size. */ VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT); if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { - rustsecp256k1_v0_4_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); + rustsecp256k1_v0_4_1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { - rustsecp256k1_v0_4_0_ecmult_context_build(&ret->ecmult_ctx, &prealloc); + rustsecp256k1_v0_4_1_ecmult_context_build(&ret->ecmult_ctx, &prealloc); } ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY); - return (rustsecp256k1_v0_4_0_context*) ret; + return (rustsecp256k1_v0_4_1_context*) ret; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_clone(const rustsecp256k1_v0_4_0_context* ctx, void* prealloc) { +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_clone(const rustsecp256k1_v0_4_1_context* ctx, void* prealloc) { size_t prealloc_size; - rustsecp256k1_v0_4_0_context* ret; + rustsecp256k1_v0_4_1_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_clone_size(ctx); - ret = (rustsecp256k1_v0_4_0_context*)prealloc; + prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_clone_size(ctx); + ret = (rustsecp256k1_v0_4_1_context*)prealloc; memcpy(ret, ctx, prealloc_size); - rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); - rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); return ret; } -void rustsecp256k1_v0_4_0_context_preallocated_destroy(rustsecp256k1_v0_4_0_context* ctx) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_preallocated_destroy(rustsecp256k1_v0_4_1_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (ctx != NULL) { - rustsecp256k1_v0_4_0_ecmult_context_clear(&ctx->ecmult_ctx); - rustsecp256k1_v0_4_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_clear(&ctx->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } -void rustsecp256k1_v0_4_0_context_set_illegal_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_set_illegal_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (fun == NULL) { - fun = rustsecp256k1_v0_4_0_default_illegal_callback_fn; + fun = rustsecp256k1_v0_4_1_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void rustsecp256k1_v0_4_0_context_set_error_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_set_error_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (fun == NULL) { - fun = rustsecp256k1_v0_4_0_default_error_callback_fn; + fun = rustsecp256k1_v0_4_1_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; @@ -194,7 +199,7 @@ void rustsecp256k1_v0_4_0_context_set_error_callback(rustsecp256k1_v0_4_0_contex * of the software. This is setup for use with valgrind but could be substituted with * the appropriate instrumentation for other analysis tools. */ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1_v0_4_0_context* ctx, const void *p, size_t len) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_declassify(const rustsecp256k1_v0_4_1_context* ctx, const void *p, size_t len) { #if defined(VALGRIND) if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len); #else @@ -204,59 +209,59 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1 #endif } -static int rustsecp256k1_v0_4_0_pubkey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ge* ge, const rustsecp256k1_v0_4_0_pubkey* pubkey) { - if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) { - /* When the rustsecp256k1_v0_4_0_ge_storage type is exactly 64 byte, use its - * representation inside rustsecp256k1_v0_4_0_pubkey, as conversion is very fast. - * Note that rustsecp256k1_v0_4_0_pubkey_save must use the same representation. */ - rustsecp256k1_v0_4_0_ge_storage s; +static int rustsecp256k1_v0_4_1_pubkey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ge* ge, const rustsecp256k1_v0_4_1_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) { + /* When the rustsecp256k1_v0_4_1_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_4_1_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_4_1_pubkey_save must use the same representation. */ + rustsecp256k1_v0_4_1_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - rustsecp256k1_v0_4_0_ge_from_storage(ge, &s); + rustsecp256k1_v0_4_1_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - rustsecp256k1_v0_4_0_fe x, y; - rustsecp256k1_v0_4_0_fe_set_b32(&x, pubkey->data); - rustsecp256k1_v0_4_0_fe_set_b32(&y, pubkey->data + 32); - rustsecp256k1_v0_4_0_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_4_1_fe x, y; + rustsecp256k1_v0_4_1_fe_set_b32(&x, pubkey->data); + rustsecp256k1_v0_4_1_fe_set_b32(&y, pubkey->data + 32); + rustsecp256k1_v0_4_1_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!rustsecp256k1_v0_4_0_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(&ge->x)); return 1; } -static void rustsecp256k1_v0_4_0_pubkey_save(rustsecp256k1_v0_4_0_pubkey* pubkey, rustsecp256k1_v0_4_0_ge* ge) { - if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) { - rustsecp256k1_v0_4_0_ge_storage s; - rustsecp256k1_v0_4_0_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_4_1_pubkey_save(rustsecp256k1_v0_4_1_pubkey* pubkey, rustsecp256k1_v0_4_1_ge* ge) { + if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) { + rustsecp256k1_v0_4_1_ge_storage s; + rustsecp256k1_v0_4_1_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(ge)); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge->x); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge->y); - rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data, &ge->x); - rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(ge)); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge->x); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge->y); + rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data + 32, &ge->y); } } -int rustsecp256k1_v0_4_0_ec_pubkey_parse(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_4_0_ge Q; +int rustsecp256k1_v0_4_1_ec_pubkey_parse(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_4_1_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!rustsecp256k1_v0_4_0_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_4_1_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - if (!rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(&Q)) { + if (!rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(&Q)) { return 0; } - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &Q); - rustsecp256k1_v0_4_0_ge_clear(&Q); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_4_1_ge_clear(&Q); return 1; } -int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_pubkey* pubkey, unsigned int flags) { - rustsecp256k1_v0_4_0_ge Q; +int rustsecp256k1_v0_4_1_ec_pubkey_serialize(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_4_1_ge Q; size_t len; int ret = 0; @@ -269,8 +274,8 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubkey)) { - ret = rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -278,39 +283,65 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* return ret; } -static void rustsecp256k1_v0_4_0_ecdsa_signature_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { +int rustsecp256k1_v0_4_1_ec_pubkey_cmp(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_pubkey* pubkey0, const rustsecp256k1_v0_4_1_pubkey* pubkey1) { + unsigned char out[2][33]; + const rustsecp256k1_v0_4_1_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pubkey0; pk[1] = pubkey1; + for (i = 0; i < 2; i++) { + size_t out_size = sizeof(out[i]); + /* If the public key is NULL or invalid, ec_pubkey_serialize will call + * the illegal_callback and return 0. In that case we will serialize the + * key as all zeros which is less than any valid public key. This + * results in consistent comparisons even if NULL or invalid pubkeys are + * involved and prevents edge cases such as sorting algorithms that use + * this function and do not terminate as a result. */ + if (!rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + /* Note that ec_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1_v0_4_1_memcmp_var(out[0], out[1], sizeof(out[0])); +} + +static void rustsecp256k1_v0_4_1_ecdsa_signature_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { - /* When the rustsecp256k1_v0_4_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_4_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_4_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { + /* When the rustsecp256k1_v0_4_1_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_4_1_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_4_1_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_4_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(s, &sig->data[32], NULL); } } -static void rustsecp256k1_v0_4_0_ecdsa_signature_save(rustsecp256k1_v0_4_0_ecdsa_signature* sig, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s) { - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { +static void rustsecp256k1_v0_4_1_ecdsa_signature_save(rustsecp256k1_v0_4_1_ecdsa_signature* sig, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s) { + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[32], s); } } -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (rustsecp256k1_v0_4_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { - rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_4_1_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -318,8 +349,8 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_co } } -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input64) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_4_1_scalar r, s; int ret = 1; int overflow = 0; @@ -327,77 +358,77 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_ ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_4_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - return rustsecp256k1_v0_4_0_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_4_1_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_4_0_ecdsa_signature_normalize(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *sigout, const rustsecp256k1_v0_4_0_ecdsa_signature *sigin) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_normalize(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *sigout, const rustsecp256k1_v0_4_1_ecdsa_signature *sigin) { + rustsecp256k1_v0_4_1_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = rustsecp256k1_v0_4_0_scalar_is_high(&s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_4_1_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); } - rustsecp256k1_v0_4_0_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_verify(const rustsecp256k1_v0_4_0_context* ctx, const rustsecp256k1_v0_4_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_0_pubkey *pubkey) { - rustsecp256k1_v0_4_0_ge q; - rustsecp256k1_v0_4_0_scalar r, s; - rustsecp256k1_v0_4_0_scalar m; +int rustsecp256k1_v0_4_1_ecdsa_verify(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_1_pubkey *pubkey) { + rustsecp256k1_v0_4_1_ge q; + rustsecp256k1_v0_4_1_scalar r, s; + rustsecp256k1_v0_4_1_scalar m; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&m, msghash32, NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - return (!rustsecp256k1_v0_4_0_scalar_is_high(&s) && - rustsecp256k1_v0_4_0_pubkey_load(ctx, &q, pubkey) && - rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); + rustsecp256k1_v0_4_1_scalar_set_b32(&m, msghash32, NULL); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_4_1_scalar_is_high(&s) && + rustsecp256k1_v0_4_1_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -408,7 +439,7 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng; unsigned int i; /* We feed a byte array to the PRNG as input, consisting of: * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. @@ -426,51 +457,51 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979 = nonce_function_rfc6979; -const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default = nonce_function_rfc6979; -static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_4_0_scalar sec, non, msg; +static int rustsecp256k1_v0_4_1_ecdsa_sign_inner(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_4_1_scalar sec, non, msg; int ret = 0; int is_sec_valid; unsigned char nonce32[32]; unsigned int count = 0; /* Default initialization here is important so we won't pass uninit values to the cmov in the end */ - *r = rustsecp256k1_v0_4_0_scalar_zero; - *s = rustsecp256k1_v0_4_0_scalar_zero; + *r = rustsecp256k1_v0_4_1_scalar_zero; + *s = rustsecp256k1_v0_4_1_scalar_zero; if (recid) { *recid = 0; } if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_4_0_nonce_function_default; + noncefp = rustsecp256k1_v0_4_1_nonce_function_default; } /* Fail if the secret key is invalid. */ - is_sec_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_one, !is_sec_valid); - rustsecp256k1_v0_4_0_scalar_set_b32(&msg, msg32, NULL); + is_sec_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_one, !is_sec_valid); + rustsecp256k1_v0_4_1_scalar_set_b32(&msg, msg32, NULL); while (1) { int is_nonce_valid; ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - is_nonce_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&non, nonce32); + is_nonce_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&non, nonce32); /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */ - rustsecp256k1_v0_4_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); + rustsecp256k1_v0_4_1_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); if (is_nonce_valid) { - ret = rustsecp256k1_v0_4_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); + ret = rustsecp256k1_v0_4_1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); /* The final signature is no longer a secret, nor is the fact that we were successful or not. */ - rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret)); if (ret) { break; } @@ -482,204 +513,204 @@ static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_cont * used as a branching variable. */ ret &= is_sec_valid; memset(nonce32, 0, 32); - rustsecp256k1_v0_4_0_scalar_clear(&msg); - rustsecp256k1_v0_4_0_scalar_clear(&non); - rustsecp256k1_v0_4_0_scalar_clear(&sec); - rustsecp256k1_v0_4_0_scalar_cmov(r, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_cmov(s, &rustsecp256k1_v0_4_0_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_clear(&msg); + rustsecp256k1_v0_4_1_scalar_clear(&non); + rustsecp256k1_v0_4_1_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_cmov(r, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_cmov(s, &rustsecp256k1_v0_4_1_scalar_zero, !ret); if (recid) { const int zero = 0; - rustsecp256k1_v0_4_0_int_cmov(recid, &zero, !ret); + rustsecp256k1_v0_4_1_int_cmov(recid, &zero, !ret); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_sign(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_sign(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_4_1_scalar r, s; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_4_0_ecdsa_signature_save(signature, &r, &s); + ret = rustsecp256k1_v0_4_1_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_4_1_ecdsa_signature_save(signature, &r, &s); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_verify(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_verify(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_scalar sec; int ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -static int rustsecp256k1_v0_4_0_ec_pubkey_create_helper(const rustsecp256k1_v0_4_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_0_scalar *seckey_scalar, rustsecp256k1_v0_4_0_ge *p, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_gej pj; +static int rustsecp256k1_v0_4_1_ec_pubkey_create_helper(const rustsecp256k1_v0_4_1_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_1_scalar *seckey_scalar, rustsecp256k1_v0_4_1_ge *p, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_gej pj; int ret; - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(seckey_scalar, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_0_scalar_one, !ret); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(seckey_scalar, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_1_scalar_one, !ret); - rustsecp256k1_v0_4_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); - rustsecp256k1_v0_4_0_ge_set_gej(p, &pj); + rustsecp256k1_v0_4_1_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); + rustsecp256k1_v0_4_1_ge_set_gej(p, &pj); return ret; } -int rustsecp256k1_v0_4_0_ec_pubkey_create(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_scalar seckey_scalar; +int rustsecp256k1_v0_4_1_ec_pubkey_create(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_scalar seckey_scalar; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); - rustsecp256k1_v0_4_0_memczero(pubkey, sizeof(*pubkey), !ret); + ret = rustsecp256k1_v0_4_1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_memczero(pubkey, sizeof(*pubkey), !ret); - rustsecp256k1_v0_4_0_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_4_1_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_negate(&sec, &sec); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_negate(&sec, &sec); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) { - return rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey); +int rustsecp256k1_v0_4_1_ec_privkey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) { + return rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey); } -int rustsecp256k1_v0_4_0_ec_pubkey_negate(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey) { +int rustsecp256k1_v0_4_1_ec_pubkey_negate(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey) { int ret = 0; - rustsecp256k1_v0_4_0_ge p; + rustsecp256k1_v0_4_1_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - rustsecp256k1_v0_4_0_ge_neg(&p, &p); - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_ge_neg(&p, &p); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } return ret; } -static int rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_0_scalar *sec, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar term; +static int rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_1_scalar *sec, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar term; int overflow = 0; int ret = 0; - rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow); - ret = (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(sec, &term); - rustsecp256k1_v0_4_0_scalar_clear(&term); + rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow); + ret = (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(sec, &term); + rustsecp256k1_v0_4_1_scalar_clear(&term); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - ret &= rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(&sec, tweak32); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + ret &= rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(&sec, tweak32); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, seckey, tweak32); +int rustsecp256k1_v0_4_1_ec_privkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, seckey, tweak32); } -static int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_0_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_0_ge *p, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar term; +static int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_1_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_1_ge *p, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar term; int overflow = 0; - rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow); - return !overflow && rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(ecmult_ctx, p, &term); + rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow); + return !overflow && rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(ecmult_ctx, p, &term); } -int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge p; +int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge p; int ret = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); - ret = ret && rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32); + ret = ret && rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32); if (ret) { - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar factor; - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar factor; + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - ret &= (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(&sec, &factor); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + ret &= (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); - rustsecp256k1_v0_4_0_scalar_clear(&factor); + rustsecp256k1_v0_4_1_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&factor); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); +int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, seckey, tweak32); } -int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_scalar factor; +int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = !overflow && rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow); + ret = !overflow && rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -688,35 +719,35 @@ int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context* return ret; } -int rustsecp256k1_v0_4_0_context_randomize(rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_4_1_context_randomize(rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int rustsecp256k1_v0_4_0_ec_pubkey_combine(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubnonce, const rustsecp256k1_v0_4_0_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_4_1_ec_pubkey_combine(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubnonce, const rustsecp256k1_v0_4_1_pubkey * const *pubnonces, size_t n) { size_t i; - rustsecp256k1_v0_4_0_gej Qj; - rustsecp256k1_v0_4_0_ge Q; + rustsecp256k1_v0_4_1_gej Qj; + rustsecp256k1_v0_4_1_ge Q; ARG_CHECK(pubnonce != NULL); memset(pubnonce, 0, sizeof(*pubnonce)); ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - rustsecp256k1_v0_4_0_gej_set_infinity(&Qj); + rustsecp256k1_v0_4_1_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { - rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubnonces[i]); - rustsecp256k1_v0_4_0_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_4_1_gej_add_ge(&Qj, &Qj, &Q); } - if (rustsecp256k1_v0_4_0_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_4_1_gej_is_infinity(&Qj)) { return 0; } - rustsecp256k1_v0_4_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_4_0_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_4_1_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_4_1_pubkey_save(pubnonce, &Q); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig b/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig index dcd6a0d..251c708 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig @@ -4,12 +4,13 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#define SECP256K1_BUILD + +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "assumptions.h" #include "util.h" -#include "num_impl.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -22,75 +23,79 @@ #include "scratch_impl.h" #include "selftest.h" +#ifdef SECP256K1_NO_BUILD +# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c" +#endif + #if defined(VALGRIND) # include #endif #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS #include #include -static void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data); -void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data); #endif -static const rustsecp256k1_v0_4_0_callback default_illegal_callback = { - rustsecp256k1_v0_4_0_default_illegal_callback_fn, +static const rustsecp256k1_v0_4_1_callback default_illegal_callback = { + rustsecp256k1_v0_4_1_default_illegal_callback_fn, NULL }; -static const rustsecp256k1_v0_4_0_callback default_error_callback = { - rustsecp256k1_v0_4_0_default_error_callback_fn, +static const rustsecp256k1_v0_4_1_callback default_error_callback = { + rustsecp256k1_v0_4_1_default_error_callback_fn, NULL }; -struct rustsecp256k1_v0_4_0_context_struct { - rustsecp256k1_v0_4_0_ecmult_context ecmult_ctx; - rustsecp256k1_v0_4_0_ecmult_gen_context ecmult_gen_ctx; - rustsecp256k1_v0_4_0_callback illegal_callback; - rustsecp256k1_v0_4_0_callback error_callback; +struct rustsecp256k1_v0_4_1_context_struct { + rustsecp256k1_v0_4_1_ecmult_context ecmult_ctx; + rustsecp256k1_v0_4_1_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_4_1_callback illegal_callback; + rustsecp256k1_v0_4_1_callback error_callback; int declassify; }; -static const rustsecp256k1_v0_4_0_context rustsecp256k1_v0_4_0_context_no_precomp_ = { +static const rustsecp256k1_v0_4_1_context rustsecp256k1_v0_4_1_context_no_precomp_ = { { 0 }, { 0 }, - { rustsecp256k1_v0_4_0_default_illegal_callback_fn, 0 }, - { rustsecp256k1_v0_4_0_default_error_callback_fn, 0 }, + { rustsecp256k1_v0_4_1_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_4_1_default_error_callback_fn, 0 }, 0 }; -const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp = &rustsecp256k1_v0_4_0_context_no_precomp_; +const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp = &rustsecp256k1_v0_4_1_context_no_precomp_; -size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) { - size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context)); +size_t rustsecp256k1_v0_4_1_context_preallocated_size(unsigned int flags) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context)); /* A return value of 0 is reserved as an indicator for errors when we call this function internally. */ VERIFY_CHECK(ret != 0); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - rustsecp256k1_v0_4_0_callback_call(&default_illegal_callback, + rustsecp256k1_v0_4_1_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } @@ -104,56 +109,56 @@ size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) { return ret; } -size_t rustsecp256k1_v0_4_0_context_preallocated_clone_size(const rustsecp256k1_v0_4_0_context* ctx) { - size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context)); +size_t rustsecp256k1_v0_4_1_context_preallocated_clone_size(const rustsecp256k1_v0_4_1_context* ctx) { + size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context)); VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; } - if (rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)) { + if (rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)) { ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; } return ret; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_create(void* prealloc, unsigned int flags) { void* const base = prealloc; size_t prealloc_size; - rustsecp256k1_v0_4_0_context* ret; + rustsecp256k1_v0_4_1_context* ret; - if (!rustsecp256k1_v0_4_0_selftest()) { - rustsecp256k1_v0_4_0_callback_call(&default_error_callback, "self test failed"); + if (!rustsecp256k1_v0_4_1_selftest()) { + rustsecp256k1_v0_4_1_callback_call(&default_error_callback, "self test failed"); } - prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_size(flags); + prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_size(flags); if (prealloc_size == 0) { return NULL; } VERIFY_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_4_0_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_0_context), base, prealloc_size); + ret = (rustsecp256k1_v0_4_1_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_1_context), base, prealloc_size); ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; - rustsecp256k1_v0_4_0_ecmult_context_init(&ret->ecmult_ctx); - rustsecp256k1_v0_4_0_ecmult_gen_context_init(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_init(&ret->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); - /* Flags have been checked by rustsecp256k1_v0_4_0_context_preallocated_size. */ + /* Flags have been checked by rustsecp256k1_v0_4_1_context_preallocated_size. */ VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT); if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { - rustsecp256k1_v0_4_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); + rustsecp256k1_v0_4_1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { - rustsecp256k1_v0_4_0_ecmult_context_build(&ret->ecmult_ctx, &prealloc); + rustsecp256k1_v0_4_1_ecmult_context_build(&ret->ecmult_ctx, &prealloc); } ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY); - return (rustsecp256k1_v0_4_0_context*) ret; + return (rustsecp256k1_v0_4_1_context*) ret; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create(unsigned int flags) { - size_t const prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_size(flags); - rustsecp256k1_v0_4_0_context* ctx = (rustsecp256k1_v0_4_0_context*)checked_malloc(&default_error_callback, prealloc_size); - if (EXPECT(rustsecp256k1_v0_4_0_context_preallocated_create(ctx, flags) == NULL, 0)) { +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_create(unsigned int flags) { + size_t const prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_size(flags); + rustsecp256k1_v0_4_1_context* ctx = (rustsecp256k1_v0_4_1_context*)checked_malloc(&default_error_callback, prealloc_size); + if (EXPECT(rustsecp256k1_v0_4_1_context_preallocated_create(ctx, flags) == NULL, 0)) { free(ctx); return NULL; } @@ -161,79 +166,79 @@ rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create(unsigned int f return ctx; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_clone(const rustsecp256k1_v0_4_0_context* ctx, void* prealloc) { +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_clone(const rustsecp256k1_v0_4_1_context* ctx, void* prealloc) { size_t prealloc_size; - rustsecp256k1_v0_4_0_context* ret; + rustsecp256k1_v0_4_1_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_clone_size(ctx); - ret = (rustsecp256k1_v0_4_0_context*)prealloc; + prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_clone_size(ctx); + ret = (rustsecp256k1_v0_4_1_context*)prealloc; memcpy(ret, ctx, prealloc_size); - rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); - rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); return ret; } -rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_clone(const rustsecp256k1_v0_4_0_context* ctx) { - rustsecp256k1_v0_4_0_context* ret; +rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_clone(const rustsecp256k1_v0_4_1_context* ctx) { + rustsecp256k1_v0_4_1_context* ret; size_t prealloc_size; VERIFY_CHECK(ctx != NULL); - prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_clone_size(ctx); - ret = (rustsecp256k1_v0_4_0_context*)checked_malloc(&ctx->error_callback, prealloc_size); - ret = rustsecp256k1_v0_4_0_context_preallocated_clone(ctx, ret); + prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_clone_size(ctx); + ret = (rustsecp256k1_v0_4_1_context*)checked_malloc(&ctx->error_callback, prealloc_size); + ret = rustsecp256k1_v0_4_1_context_preallocated_clone(ctx, ret); return ret; } -void rustsecp256k1_v0_4_0_context_preallocated_destroy(rustsecp256k1_v0_4_0_context* ctx) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_preallocated_destroy(rustsecp256k1_v0_4_1_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (ctx != NULL) { - rustsecp256k1_v0_4_0_ecmult_context_clear(&ctx->ecmult_ctx); - rustsecp256k1_v0_4_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_4_1_ecmult_context_clear(&ctx->ecmult_ctx); + rustsecp256k1_v0_4_1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } -void rustsecp256k1_v0_4_0_context_destroy(rustsecp256k1_v0_4_0_context* ctx) { +void rustsecp256k1_v0_4_1_context_destroy(rustsecp256k1_v0_4_1_context* ctx) { if (ctx != NULL) { - rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx); + rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx); free(ctx); } } -void rustsecp256k1_v0_4_0_context_set_illegal_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_set_illegal_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (fun == NULL) { - fun = rustsecp256k1_v0_4_0_default_illegal_callback_fn; + fun = rustsecp256k1_v0_4_1_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void rustsecp256k1_v0_4_0_context_set_error_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp); +void rustsecp256k1_v0_4_1_context_set_error_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp); if (fun == NULL) { - fun = rustsecp256k1_v0_4_0_default_error_callback_fn; + fun = rustsecp256k1_v0_4_1_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; } -rustsecp256k1_v0_4_0_scratch_space* rustsecp256k1_v0_4_0_scratch_space_create(const rustsecp256k1_v0_4_0_context* ctx, size_t max_size) { +rustsecp256k1_v0_4_1_scratch_space* rustsecp256k1_v0_4_1_scratch_space_create(const rustsecp256k1_v0_4_1_context* ctx, size_t max_size) { VERIFY_CHECK(ctx != NULL); - return rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, max_size); + return rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, max_size); } -void rustsecp256k1_v0_4_0_scratch_space_destroy(const rustsecp256k1_v0_4_0_context *ctx, rustsecp256k1_v0_4_0_scratch_space* scratch) { +void rustsecp256k1_v0_4_1_scratch_space_destroy(const rustsecp256k1_v0_4_1_context *ctx, rustsecp256k1_v0_4_1_scratch_space* scratch) { VERIFY_CHECK(ctx != NULL); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); } /* Mark memory as no-longer-secret for the purpose of analysing constant-time behaviour * of the software. This is setup for use with valgrind but could be substituted with * the appropriate instrumentation for other analysis tools. */ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1_v0_4_0_context* ctx, const void *p, size_t len) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_declassify(const rustsecp256k1_v0_4_1_context* ctx, const void *p, size_t len) { #if defined(VALGRIND) if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len); #else @@ -243,59 +248,59 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1 #endif } -static int rustsecp256k1_v0_4_0_pubkey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ge* ge, const rustsecp256k1_v0_4_0_pubkey* pubkey) { - if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) { - /* When the rustsecp256k1_v0_4_0_ge_storage type is exactly 64 byte, use its - * representation inside rustsecp256k1_v0_4_0_pubkey, as conversion is very fast. - * Note that rustsecp256k1_v0_4_0_pubkey_save must use the same representation. */ - rustsecp256k1_v0_4_0_ge_storage s; +static int rustsecp256k1_v0_4_1_pubkey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ge* ge, const rustsecp256k1_v0_4_1_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) { + /* When the rustsecp256k1_v0_4_1_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_4_1_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_4_1_pubkey_save must use the same representation. */ + rustsecp256k1_v0_4_1_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - rustsecp256k1_v0_4_0_ge_from_storage(ge, &s); + rustsecp256k1_v0_4_1_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - rustsecp256k1_v0_4_0_fe x, y; - rustsecp256k1_v0_4_0_fe_set_b32(&x, pubkey->data); - rustsecp256k1_v0_4_0_fe_set_b32(&y, pubkey->data + 32); - rustsecp256k1_v0_4_0_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_4_1_fe x, y; + rustsecp256k1_v0_4_1_fe_set_b32(&x, pubkey->data); + rustsecp256k1_v0_4_1_fe_set_b32(&y, pubkey->data + 32); + rustsecp256k1_v0_4_1_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!rustsecp256k1_v0_4_0_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(&ge->x)); return 1; } -static void rustsecp256k1_v0_4_0_pubkey_save(rustsecp256k1_v0_4_0_pubkey* pubkey, rustsecp256k1_v0_4_0_ge* ge) { - if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) { - rustsecp256k1_v0_4_0_ge_storage s; - rustsecp256k1_v0_4_0_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_4_1_pubkey_save(rustsecp256k1_v0_4_1_pubkey* pubkey, rustsecp256k1_v0_4_1_ge* ge) { + if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) { + rustsecp256k1_v0_4_1_ge_storage s; + rustsecp256k1_v0_4_1_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(ge)); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge->x); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge->y); - rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data, &ge->x); - rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(ge)); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge->x); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge->y); + rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data + 32, &ge->y); } } -int rustsecp256k1_v0_4_0_ec_pubkey_parse(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_4_0_ge Q; +int rustsecp256k1_v0_4_1_ec_pubkey_parse(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_4_1_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!rustsecp256k1_v0_4_0_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_4_1_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - if (!rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(&Q)) { + if (!rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(&Q)) { return 0; } - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &Q); - rustsecp256k1_v0_4_0_ge_clear(&Q); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_4_1_ge_clear(&Q); return 1; } -int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_pubkey* pubkey, unsigned int flags) { - rustsecp256k1_v0_4_0_ge Q; +int rustsecp256k1_v0_4_1_ec_pubkey_serialize(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_4_1_ge Q; size_t len; int ret = 0; @@ -308,8 +313,8 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubkey)) { - ret = rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -317,39 +322,65 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* return ret; } -static void rustsecp256k1_v0_4_0_ecdsa_signature_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { +int rustsecp256k1_v0_4_1_ec_pubkey_cmp(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_pubkey* pubkey0, const rustsecp256k1_v0_4_1_pubkey* pubkey1) { + unsigned char out[2][33]; + const rustsecp256k1_v0_4_1_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pubkey0; pk[1] = pubkey1; + for (i = 0; i < 2; i++) { + size_t out_size = sizeof(out[i]); + /* If the public key is NULL or invalid, ec_pubkey_serialize will call + * the illegal_callback and return 0. In that case we will serialize the + * key as all zeros which is less than any valid public key. This + * results in consistent comparisons even if NULL or invalid pubkeys are + * involved and prevents edge cases such as sorting algorithms that use + * this function and do not terminate as a result. */ + if (!rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + /* Note that ec_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1_v0_4_1_memcmp_var(out[0], out[1], sizeof(out[0])); +} + +static void rustsecp256k1_v0_4_1_ecdsa_signature_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { - /* When the rustsecp256k1_v0_4_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_4_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_4_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { + /* When the rustsecp256k1_v0_4_1_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_4_1_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_4_1_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_4_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_4_1_scalar_set_b32(s, &sig->data[32], NULL); } } -static void rustsecp256k1_v0_4_0_ecdsa_signature_save(rustsecp256k1_v0_4_0_ecdsa_signature* sig, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s) { - if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) { +static void rustsecp256k1_v0_4_1_ecdsa_signature_save(rustsecp256k1_v0_4_1_ecdsa_signature* sig, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s) { + if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[32], s); } } -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (rustsecp256k1_v0_4_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { - rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_4_1_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -357,8 +388,8 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_co } } -int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input64) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_4_1_scalar r, s; int ret = 1; int overflow = 0; @@ -366,77 +397,77 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_ ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_4_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - return rustsecp256k1_v0_4_0_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_4_1_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) { + rustsecp256k1_v0_4_1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_4_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_4_1_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_4_0_ecdsa_signature_normalize(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *sigout, const rustsecp256k1_v0_4_0_ecdsa_signature *sigin) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_signature_normalize(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *sigout, const rustsecp256k1_v0_4_1_ecdsa_signature *sigin) { + rustsecp256k1_v0_4_1_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = rustsecp256k1_v0_4_0_scalar_is_high(&s); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_4_1_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); } - rustsecp256k1_v0_4_0_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_verify(const rustsecp256k1_v0_4_0_context* ctx, const rustsecp256k1_v0_4_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_0_pubkey *pubkey) { - rustsecp256k1_v0_4_0_ge q; - rustsecp256k1_v0_4_0_scalar r, s; - rustsecp256k1_v0_4_0_scalar m; +int rustsecp256k1_v0_4_1_ecdsa_verify(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_1_pubkey *pubkey) { + rustsecp256k1_v0_4_1_ge q; + rustsecp256k1_v0_4_1_scalar r, s; + rustsecp256k1_v0_4_1_scalar m; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&m, msghash32, NULL); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig); - return (!rustsecp256k1_v0_4_0_scalar_is_high(&s) && - rustsecp256k1_v0_4_0_pubkey_load(ctx, &q, pubkey) && - rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); + rustsecp256k1_v0_4_1_scalar_set_b32(&m, msghash32, NULL); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_4_1_scalar_is_high(&s) && + rustsecp256k1_v0_4_1_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -447,7 +478,7 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng; unsigned int i; /* We feed a byte array to the PRNG as input, consisting of: * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. @@ -465,51 +496,51 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979 = nonce_function_rfc6979; -const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default = nonce_function_rfc6979; -static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_4_0_scalar sec, non, msg; +static int rustsecp256k1_v0_4_1_ecdsa_sign_inner(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_4_1_scalar sec, non, msg; int ret = 0; int is_sec_valid; unsigned char nonce32[32]; unsigned int count = 0; /* Default initialization here is important so we won't pass uninit values to the cmov in the end */ - *r = rustsecp256k1_v0_4_0_scalar_zero; - *s = rustsecp256k1_v0_4_0_scalar_zero; + *r = rustsecp256k1_v0_4_1_scalar_zero; + *s = rustsecp256k1_v0_4_1_scalar_zero; if (recid) { *recid = 0; } if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_4_0_nonce_function_default; + noncefp = rustsecp256k1_v0_4_1_nonce_function_default; } /* Fail if the secret key is invalid. */ - is_sec_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_one, !is_sec_valid); - rustsecp256k1_v0_4_0_scalar_set_b32(&msg, msg32, NULL); + is_sec_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_one, !is_sec_valid); + rustsecp256k1_v0_4_1_scalar_set_b32(&msg, msg32, NULL); while (1) { int is_nonce_valid; ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - is_nonce_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&non, nonce32); + is_nonce_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&non, nonce32); /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */ - rustsecp256k1_v0_4_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); + rustsecp256k1_v0_4_1_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); if (is_nonce_valid) { - ret = rustsecp256k1_v0_4_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); + ret = rustsecp256k1_v0_4_1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); /* The final signature is no longer a secret, nor is the fact that we were successful or not. */ - rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret)); if (ret) { break; } @@ -521,204 +552,204 @@ static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_cont * used as a branching variable. */ ret &= is_sec_valid; memset(nonce32, 0, 32); - rustsecp256k1_v0_4_0_scalar_clear(&msg); - rustsecp256k1_v0_4_0_scalar_clear(&non); - rustsecp256k1_v0_4_0_scalar_clear(&sec); - rustsecp256k1_v0_4_0_scalar_cmov(r, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_cmov(s, &rustsecp256k1_v0_4_0_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_clear(&msg); + rustsecp256k1_v0_4_1_scalar_clear(&non); + rustsecp256k1_v0_4_1_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_cmov(r, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_cmov(s, &rustsecp256k1_v0_4_1_scalar_zero, !ret); if (recid) { const int zero = 0; - rustsecp256k1_v0_4_0_int_cmov(recid, &zero, !ret); + rustsecp256k1_v0_4_1_int_cmov(recid, &zero, !ret); } return ret; } -int rustsecp256k1_v0_4_0_ecdsa_sign(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_4_0_scalar r, s; +int rustsecp256k1_v0_4_1_ecdsa_sign(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_4_1_scalar r, s; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_4_0_ecdsa_signature_save(signature, &r, &s); + ret = rustsecp256k1_v0_4_1_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_4_1_ecdsa_signature_save(signature, &r, &s); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_verify(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_verify(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_scalar sec; int ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -static int rustsecp256k1_v0_4_0_ec_pubkey_create_helper(const rustsecp256k1_v0_4_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_0_scalar *seckey_scalar, rustsecp256k1_v0_4_0_ge *p, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_gej pj; +static int rustsecp256k1_v0_4_1_ec_pubkey_create_helper(const rustsecp256k1_v0_4_1_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_1_scalar *seckey_scalar, rustsecp256k1_v0_4_1_ge *p, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_gej pj; int ret; - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(seckey_scalar, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_0_scalar_one, !ret); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(seckey_scalar, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_1_scalar_one, !ret); - rustsecp256k1_v0_4_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); - rustsecp256k1_v0_4_0_ge_set_gej(p, &pj); + rustsecp256k1_v0_4_1_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); + rustsecp256k1_v0_4_1_ge_set_gej(p, &pj); return ret; } -int rustsecp256k1_v0_4_0_ec_pubkey_create(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *seckey) { - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_scalar seckey_scalar; +int rustsecp256k1_v0_4_1_ec_pubkey_create(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_scalar seckey_scalar; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); - rustsecp256k1_v0_4_0_memczero(pubkey, sizeof(*pubkey), !ret); + ret = rustsecp256k1_v0_4_1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_memczero(pubkey, sizeof(*pubkey), !ret); - rustsecp256k1_v0_4_0_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_4_1_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_negate(&sec, &sec); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_negate(&sec, &sec); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) { - return rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey); +int rustsecp256k1_v0_4_1_ec_privkey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) { + return rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey); } -int rustsecp256k1_v0_4_0_ec_pubkey_negate(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey) { +int rustsecp256k1_v0_4_1_ec_pubkey_negate(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey) { int ret = 0; - rustsecp256k1_v0_4_0_ge p; + rustsecp256k1_v0_4_1_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - rustsecp256k1_v0_4_0_ge_neg(&p, &p); - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_ge_neg(&p, &p); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } return ret; } -static int rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_0_scalar *sec, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar term; +static int rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_1_scalar *sec, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar term; int overflow = 0; int ret = 0; - rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow); - ret = (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(sec, &term); - rustsecp256k1_v0_4_0_scalar_clear(&term); + rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow); + ret = (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(sec, &term); + rustsecp256k1_v0_4_1_scalar_clear(&term); return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - ret &= rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(&sec, tweak32); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + ret &= rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(&sec, tweak32); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, seckey, tweak32); +int rustsecp256k1_v0_4_1_ec_privkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, seckey, tweak32); } -static int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_0_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_0_ge *p, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar term; +static int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_1_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_1_ge *p, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar term; int overflow = 0; - rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow); - return !overflow && rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(ecmult_ctx, p, &term); + rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow); + return !overflow && rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(ecmult_ctx, p, &term); } -int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge p; +int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge p; int ret = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); - ret = ret && rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32); + ret = ret && rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32); if (ret) { - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } return ret; } -int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_scalar factor; - rustsecp256k1_v0_4_0_scalar sec; +int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_scalar factor; + rustsecp256k1_v0_4_1_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey); - ret &= (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(&sec, &factor); - rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret); - rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow); + ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey); + ret &= (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret); + rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_4_0_scalar_clear(&sec); - rustsecp256k1_v0_4_0_scalar_clear(&factor); + rustsecp256k1_v0_4_1_scalar_clear(&sec); + rustsecp256k1_v0_4_1_scalar_clear(&factor); return ret; } -int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); +int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, seckey, tweak32); } -int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_scalar factor; +int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = !overflow && rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow); + ret = !overflow && rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { - rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { + rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -727,35 +758,35 @@ int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context* return ret; } -int rustsecp256k1_v0_4_0_context_randomize(rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_4_1_context_randomize(rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int rustsecp256k1_v0_4_0_ec_pubkey_combine(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubnonce, const rustsecp256k1_v0_4_0_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_4_1_ec_pubkey_combine(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubnonce, const rustsecp256k1_v0_4_1_pubkey * const *pubnonces, size_t n) { size_t i; - rustsecp256k1_v0_4_0_gej Qj; - rustsecp256k1_v0_4_0_ge Q; + rustsecp256k1_v0_4_1_gej Qj; + rustsecp256k1_v0_4_1_ge Q; ARG_CHECK(pubnonce != NULL); memset(pubnonce, 0, sizeof(*pubnonce)); ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - rustsecp256k1_v0_4_0_gej_set_infinity(&Qj); + rustsecp256k1_v0_4_1_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { - rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubnonces[i]); - rustsecp256k1_v0_4_0_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_4_1_gej_add_ge(&Qj, &Qj, &Q); } - if (rustsecp256k1_v0_4_0_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_4_1_gej_is_infinity(&Qj)) { return 0; } - rustsecp256k1_v0_4_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_4_0_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_4_1_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_4_1_pubkey_save(pubnonce, &Q); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/selftest.h b/secp256k1-sys/depend/secp256k1/src/selftest.h index 1ba337d..33c24be 100644 --- a/secp256k1-sys/depend/secp256k1/src/selftest.h +++ b/secp256k1-sys/depend/secp256k1/src/selftest.h @@ -11,22 +11,22 @@ #include -static int rustsecp256k1_v0_4_0_selftest_sha256(void) { +static int rustsecp256k1_v0_4_1_selftest_sha256(void) { static const char *input63 = "For this sample, this 63-byte string will be used as input data"; static const unsigned char output32[32] = { 0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42, }; unsigned char out[32]; - rustsecp256k1_v0_4_0_sha256 hasher; - rustsecp256k1_v0_4_0_sha256_initialize(&hasher); - rustsecp256k1_v0_4_0_sha256_write(&hasher, (const unsigned char*)input63, 63); - rustsecp256k1_v0_4_0_sha256_finalize(&hasher, out); - return rustsecp256k1_v0_4_0_memcmp_var(out, output32, 32) == 0; + rustsecp256k1_v0_4_1_sha256 hasher; + rustsecp256k1_v0_4_1_sha256_initialize(&hasher); + rustsecp256k1_v0_4_1_sha256_write(&hasher, (const unsigned char*)input63, 63); + rustsecp256k1_v0_4_1_sha256_finalize(&hasher, out); + return rustsecp256k1_v0_4_1_memcmp_var(out, output32, 32) == 0; } -static int rustsecp256k1_v0_4_0_selftest(void) { - return rustsecp256k1_v0_4_0_selftest_sha256(); +static int rustsecp256k1_v0_4_1_selftest(void) { + return rustsecp256k1_v0_4_1_selftest_sha256(); } #endif /* SECP256K1_SELFTEST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand.h b/secp256k1-sys/depend/secp256k1/src/testrand.h index 59fa289..210faef 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand.h @@ -14,34 +14,34 @@ /* A non-cryptographic RNG used only for test infrastructure. */ /** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_testrand_seed(const unsigned char *seed16); +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_testrand_seed(const unsigned char *seed16); /** Generate a pseudorandom number in the range [0..2**32-1]. */ -static uint32_t rustsecp256k1_v0_4_0_testrand32(void); +static uint32_t rustsecp256k1_v0_4_1_testrand32(void); /** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or * more. */ -static uint32_t rustsecp256k1_v0_4_0_testrand_bits(int bits); +static uint32_t rustsecp256k1_v0_4_1_testrand_bits(int bits); /** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range); +static uint32_t rustsecp256k1_v0_4_1_testrand_int(uint32_t range); /** Generate a pseudorandom 32-byte array. */ -static void rustsecp256k1_v0_4_0_testrand256(unsigned char *b32); +static void rustsecp256k1_v0_4_1_testrand256(unsigned char *b32); /** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_4_0_testrand256_test(unsigned char *b32); +static void rustsecp256k1_v0_4_1_testrand256_test(unsigned char *b32); /** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_t len); +static void rustsecp256k1_v0_4_1_testrand_bytes_test(unsigned char *bytes, size_t len); /** Flip a single random bit in a byte array */ -static void rustsecp256k1_v0_4_0_testrand_flip(unsigned char *b, size_t len); +static void rustsecp256k1_v0_4_1_testrand_flip(unsigned char *b, size_t len); /** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */ -static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed); +static void rustsecp256k1_v0_4_1_testrand_init(const char* hexseed); /** Print final test information. */ -static void rustsecp256k1_v0_4_0_testrand_finish(void); +static void rustsecp256k1_v0_4_1_testrand_finish(void); #endif /* SECP256K1_TESTRAND_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h index 2159f2f..1b1c016 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h @@ -14,38 +14,38 @@ #include "testrand.h" #include "hash.h" -static rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rustsecp256k1_v0_4_0_test_rng; -static uint32_t rustsecp256k1_v0_4_0_test_rng_precomputed[8]; -static int rustsecp256k1_v0_4_0_test_rng_precomputed_used = 8; -static uint64_t rustsecp256k1_v0_4_0_test_rng_integer; -static int rustsecp256k1_v0_4_0_test_rng_integer_bits_left = 0; +static rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rustsecp256k1_v0_4_1_test_rng; +static uint32_t rustsecp256k1_v0_4_1_test_rng_precomputed[8]; +static int rustsecp256k1_v0_4_1_test_rng_precomputed_used = 8; +static uint64_t rustsecp256k1_v0_4_1_test_rng_integer; +static int rustsecp256k1_v0_4_1_test_rng_integer_bits_left = 0; -SECP256K1_INLINE static void rustsecp256k1_v0_4_0_testrand_seed(const unsigned char *seed16) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rustsecp256k1_v0_4_0_test_rng, seed16, 16); +SECP256K1_INLINE static void rustsecp256k1_v0_4_1_testrand_seed(const unsigned char *seed16) { + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rustsecp256k1_v0_4_1_test_rng, seed16, 16); } -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_4_0_testrand32(void) { - if (rustsecp256k1_v0_4_0_test_rng_precomputed_used == 8) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_0_test_rng, (unsigned char*)(&rustsecp256k1_v0_4_0_test_rng_precomputed[0]), sizeof(rustsecp256k1_v0_4_0_test_rng_precomputed)); - rustsecp256k1_v0_4_0_test_rng_precomputed_used = 0; +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_4_1_testrand32(void) { + if (rustsecp256k1_v0_4_1_test_rng_precomputed_used == 8) { + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_1_test_rng, (unsigned char*)(&rustsecp256k1_v0_4_1_test_rng_precomputed[0]), sizeof(rustsecp256k1_v0_4_1_test_rng_precomputed)); + rustsecp256k1_v0_4_1_test_rng_precomputed_used = 0; } - return rustsecp256k1_v0_4_0_test_rng_precomputed[rustsecp256k1_v0_4_0_test_rng_precomputed_used++]; + return rustsecp256k1_v0_4_1_test_rng_precomputed[rustsecp256k1_v0_4_1_test_rng_precomputed_used++]; } -static uint32_t rustsecp256k1_v0_4_0_testrand_bits(int bits) { +static uint32_t rustsecp256k1_v0_4_1_testrand_bits(int bits) { uint32_t ret; - if (rustsecp256k1_v0_4_0_test_rng_integer_bits_left < bits) { - rustsecp256k1_v0_4_0_test_rng_integer |= (((uint64_t)rustsecp256k1_v0_4_0_testrand32()) << rustsecp256k1_v0_4_0_test_rng_integer_bits_left); - rustsecp256k1_v0_4_0_test_rng_integer_bits_left += 32; + if (rustsecp256k1_v0_4_1_test_rng_integer_bits_left < bits) { + rustsecp256k1_v0_4_1_test_rng_integer |= (((uint64_t)rustsecp256k1_v0_4_1_testrand32()) << rustsecp256k1_v0_4_1_test_rng_integer_bits_left); + rustsecp256k1_v0_4_1_test_rng_integer_bits_left += 32; } - ret = rustsecp256k1_v0_4_0_test_rng_integer; - rustsecp256k1_v0_4_0_test_rng_integer >>= bits; - rustsecp256k1_v0_4_0_test_rng_integer_bits_left -= bits; + ret = rustsecp256k1_v0_4_1_test_rng_integer; + rustsecp256k1_v0_4_1_test_rng_integer >>= bits; + rustsecp256k1_v0_4_1_test_rng_integer_bits_left -= bits; ret &= ((~((uint32_t)0)) >> (32 - bits)); return ret; } -static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range) { +static uint32_t rustsecp256k1_v0_4_1_testrand_int(uint32_t range) { /* We want a uniform integer between 0 and range-1, inclusive. * B is the smallest number such that range <= 2**B. * two mechanisms implemented here: @@ -77,25 +77,25 @@ static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range) { mult = 1; } while(1) { - uint32_t x = rustsecp256k1_v0_4_0_testrand_bits(bits); + uint32_t x = rustsecp256k1_v0_4_1_testrand_bits(bits); if (x < trange) { return (mult == 1) ? x : (x % range); } } } -static void rustsecp256k1_v0_4_0_testrand256(unsigned char *b32) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_0_test_rng, b32, 32); +static void rustsecp256k1_v0_4_1_testrand256(unsigned char *b32) { + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_1_test_rng, b32, 32); } -static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_t len) { +static void rustsecp256k1_v0_4_1_testrand_bytes_test(unsigned char *bytes, size_t len) { size_t bits = 0; memset(bytes, 0, len); while (bits < len * 8) { int now; uint32_t val; - now = 1 + (rustsecp256k1_v0_4_0_testrand_bits(6) * rustsecp256k1_v0_4_0_testrand_bits(5) + 16) / 31; - val = rustsecp256k1_v0_4_0_testrand_bits(1); + now = 1 + (rustsecp256k1_v0_4_1_testrand_bits(6) * rustsecp256k1_v0_4_1_testrand_bits(5) + 16) / 31; + val = rustsecp256k1_v0_4_1_testrand_bits(1); while (now > 0 && bits < len * 8) { bytes[bits / 8] |= val << (bits % 8); now--; @@ -104,15 +104,15 @@ static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_ } } -static void rustsecp256k1_v0_4_0_testrand256_test(unsigned char *b32) { - rustsecp256k1_v0_4_0_testrand_bytes_test(b32, 32); +static void rustsecp256k1_v0_4_1_testrand256_test(unsigned char *b32) { + rustsecp256k1_v0_4_1_testrand_bytes_test(b32, 32); } -static void rustsecp256k1_v0_4_0_testrand_flip(unsigned char *b, size_t len) { - b[rustsecp256k1_v0_4_0_testrand_int(len)] ^= (1 << rustsecp256k1_v0_4_0_testrand_int(8)); +static void rustsecp256k1_v0_4_1_testrand_flip(unsigned char *b, size_t len) { + b[rustsecp256k1_v0_4_1_testrand_int(len)] ^= (1 << rustsecp256k1_v0_4_1_testrand_int(8)); } -static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) { +static void rustsecp256k1_v0_4_1_testrand_init(const char* hexseed) { unsigned char seed16[16] = {0}; if (hexseed && strlen(hexseed) != 0) { int pos = 0; @@ -127,7 +127,7 @@ static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) { pos++; } } else { - FILE *frand = fopen("/dev/urandom", "r"); + FILE *frand = fopen("/dev/urandom", "rb"); if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { uint64_t t = time(NULL) * (uint64_t)1337; fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); @@ -146,12 +146,12 @@ static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) { } printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); - rustsecp256k1_v0_4_0_testrand_seed(seed16); + rustsecp256k1_v0_4_1_testrand_seed(seed16); } -static void rustsecp256k1_v0_4_0_testrand_finish(void) { +static void rustsecp256k1_v0_4_1_testrand_finish(void) { unsigned char run32[32]; - rustsecp256k1_v0_4_0_testrand256(run32); + rustsecp256k1_v0_4_1_testrand256(run32); printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); } diff --git a/secp256k1-sys/depend/secp256k1/src/tests.c b/secp256k1-sys/depend/secp256k1/src/tests.c index cb7b168..a4987b7 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests.c +++ b/secp256k1-sys/depend/secp256k1/src/tests.c @@ -15,25 +15,31 @@ #include #include "secp256k1.c" -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "testrand_impl.h" +#include "util.h" #ifdef ENABLE_OPENSSL_TESTS -#include "openssl/bn.h" -#include "openssl/ec.h" -#include "openssl/ecdsa.h" -#include "openssl/obj_mac.h" +#include +#include +#include +#include # if OPENSSL_VERSION_NUMBER < 0x10100000L void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) {*pr = sig->r; *ps = sig->s;} # endif #endif -#include "contrib/lax_der_parsing.c" -#include "contrib/lax_der_privatekey_parsing.c" +#include "../contrib/lax_der_parsing.c" +#include "../contrib/lax_der_privatekey_parsing.c" + +#include "modinv32_impl.h" +#ifdef SECP256K1_WIDEMUL_INT128 +#include "modinv64_impl.h" +#endif static int count = 64; -static rustsecp256k1_v0_4_0_context *ctx = NULL; +static rustsecp256k1_v0_4_1_context *ctx = NULL; static void counting_illegal_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts. */ @@ -51,79 +57,79 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) { (*p)--; } -void random_field_element_test(rustsecp256k1_v0_4_0_fe *fe) { +void random_field_element_test(rustsecp256k1_v0_4_1_fe *fe) { do { unsigned char b32[32]; - rustsecp256k1_v0_4_0_testrand256_test(b32); - if (rustsecp256k1_v0_4_0_fe_set_b32(fe, b32)) { + rustsecp256k1_v0_4_1_testrand256_test(b32); + if (rustsecp256k1_v0_4_1_fe_set_b32(fe, b32)) { break; } } while(1); } -void random_field_element_magnitude(rustsecp256k1_v0_4_0_fe *fe) { - rustsecp256k1_v0_4_0_fe zero; - int n = rustsecp256k1_v0_4_0_testrand_int(9); - rustsecp256k1_v0_4_0_fe_normalize(fe); +void random_field_element_magnitude(rustsecp256k1_v0_4_1_fe *fe) { + rustsecp256k1_v0_4_1_fe zero; + int n = rustsecp256k1_v0_4_1_testrand_int(9); + rustsecp256k1_v0_4_1_fe_normalize(fe); if (n == 0) { return; } - rustsecp256k1_v0_4_0_fe_clear(&zero); - rustsecp256k1_v0_4_0_fe_negate(&zero, &zero, 0); - rustsecp256k1_v0_4_0_fe_mul_int(&zero, n - 1); - rustsecp256k1_v0_4_0_fe_add(fe, &zero); + rustsecp256k1_v0_4_1_fe_clear(&zero); + rustsecp256k1_v0_4_1_fe_negate(&zero, &zero, 0); + rustsecp256k1_v0_4_1_fe_mul_int(&zero, n - 1); + rustsecp256k1_v0_4_1_fe_add(fe, &zero); #ifdef VERIFY CHECK(fe->magnitude == n); #endif } -void random_group_element_test(rustsecp256k1_v0_4_0_ge *ge) { - rustsecp256k1_v0_4_0_fe fe; +void random_group_element_test(rustsecp256k1_v0_4_1_ge *ge) { + rustsecp256k1_v0_4_1_fe fe; do { random_field_element_test(&fe); - if (rustsecp256k1_v0_4_0_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_4_0_testrand_bits(1))) { - rustsecp256k1_v0_4_0_fe_normalize(&ge->y); + if (rustsecp256k1_v0_4_1_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_4_1_testrand_bits(1))) { + rustsecp256k1_v0_4_1_fe_normalize(&ge->y); break; } } while(1); ge->infinity = 0; } -void random_group_element_jacobian_test(rustsecp256k1_v0_4_0_gej *gej, const rustsecp256k1_v0_4_0_ge *ge) { - rustsecp256k1_v0_4_0_fe z2, z3; +void random_group_element_jacobian_test(rustsecp256k1_v0_4_1_gej *gej, const rustsecp256k1_v0_4_1_ge *ge) { + rustsecp256k1_v0_4_1_fe z2, z3; do { random_field_element_test(&gej->z); - if (!rustsecp256k1_v0_4_0_fe_is_zero(&gej->z)) { + if (!rustsecp256k1_v0_4_1_fe_is_zero(&gej->z)) { break; } } while(1); - rustsecp256k1_v0_4_0_fe_sqr(&z2, &gej->z); - rustsecp256k1_v0_4_0_fe_mul(&z3, &z2, &gej->z); - rustsecp256k1_v0_4_0_fe_mul(&gej->x, &ge->x, &z2); - rustsecp256k1_v0_4_0_fe_mul(&gej->y, &ge->y, &z3); + rustsecp256k1_v0_4_1_fe_sqr(&z2, &gej->z); + rustsecp256k1_v0_4_1_fe_mul(&z3, &z2, &gej->z); + rustsecp256k1_v0_4_1_fe_mul(&gej->x, &ge->x, &z2); + rustsecp256k1_v0_4_1_fe_mul(&gej->y, &ge->y, &z3); gej->infinity = ge->infinity; } -void random_scalar_order_test(rustsecp256k1_v0_4_0_scalar *num) { +void random_scalar_order_test(rustsecp256k1_v0_4_1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_4_0_testrand256_test(b32); - rustsecp256k1_v0_4_0_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_4_0_scalar_is_zero(num)) { + rustsecp256k1_v0_4_1_testrand256_test(b32); + rustsecp256k1_v0_4_1_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_4_1_scalar_is_zero(num)) { continue; } break; } while(1); } -void random_scalar_order(rustsecp256k1_v0_4_0_scalar *num) { +void random_scalar_order(rustsecp256k1_v0_4_1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_4_0_testrand256(b32); - rustsecp256k1_v0_4_0_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_4_0_scalar_is_zero(num)) { + rustsecp256k1_v0_4_1_testrand256(b32); + rustsecp256k1_v0_4_1_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_4_1_scalar_is_zero(num)) { continue; } break; @@ -131,205 +137,205 @@ void random_scalar_order(rustsecp256k1_v0_4_0_scalar *num) { } void random_scalar_order_b32(unsigned char *b32) { - rustsecp256k1_v0_4_0_scalar num; + rustsecp256k1_v0_4_1_scalar num; random_scalar_order(&num); - rustsecp256k1_v0_4_0_scalar_get_b32(b32, &num); + rustsecp256k1_v0_4_1_scalar_get_b32(b32, &num); } void run_context_tests(int use_prealloc) { - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_pubkey zero_pubkey; - rustsecp256k1_v0_4_0_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey zero_pubkey; + rustsecp256k1_v0_4_1_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; int32_t ecount2; - rustsecp256k1_v0_4_0_context *none; - rustsecp256k1_v0_4_0_context *sign; - rustsecp256k1_v0_4_0_context *vrfy; - rustsecp256k1_v0_4_0_context *both; + rustsecp256k1_v0_4_1_context *none; + rustsecp256k1_v0_4_1_context *sign; + rustsecp256k1_v0_4_1_context *vrfy; + rustsecp256k1_v0_4_1_context *both; void *none_prealloc = NULL; void *sign_prealloc = NULL; void *vrfy_prealloc = NULL; void *both_prealloc = NULL; - rustsecp256k1_v0_4_0_gej pubj; - rustsecp256k1_v0_4_0_ge pub; - rustsecp256k1_v0_4_0_scalar msg, key, nonce; - rustsecp256k1_v0_4_0_scalar sigr, sigs; + rustsecp256k1_v0_4_1_gej pubj; + rustsecp256k1_v0_4_1_ge pub; + rustsecp256k1_v0_4_1_scalar msg, key, nonce; + rustsecp256k1_v0_4_1_scalar sigr, sigs; if (use_prealloc) { - none_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - sign_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - vrfy_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - both_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + none_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + sign_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + vrfy_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + both_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(none_prealloc != NULL); CHECK(sign_prealloc != NULL); CHECK(vrfy_prealloc != NULL); CHECK(both_prealloc != NULL); - none = rustsecp256k1_v0_4_0_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); - sign = rustsecp256k1_v0_4_0_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); - vrfy = rustsecp256k1_v0_4_0_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); - both = rustsecp256k1_v0_4_0_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + none = rustsecp256k1_v0_4_1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); + sign = rustsecp256k1_v0_4_1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); + vrfy = rustsecp256k1_v0_4_1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); + both = rustsecp256k1_v0_4_1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } else { - none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); - sign = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN); - vrfy = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY); - both = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE); + sign = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN); + vrfy = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY); + both = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } memset(&zero_pubkey, 0, sizeof(zero_pubkey)); ecount = 0; ecount2 = 10; - rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); - /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_4_0_context_clone() below) */ - rustsecp256k1_v0_4_0_context_set_error_callback(sign, rustsecp256k1_v0_4_0_default_illegal_callback_fn, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); + /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_4_1_context_clone() below) */ + rustsecp256k1_v0_4_1_context_set_error_callback(sign, rustsecp256k1_v0_4_1_default_illegal_callback_fn, NULL); CHECK(sign->error_callback.fn != vrfy->error_callback.fn); - CHECK(sign->error_callback.fn == rustsecp256k1_v0_4_0_default_illegal_callback_fn); + CHECK(sign->error_callback.fn == rustsecp256k1_v0_4_1_default_illegal_callback_fn); /* check if sizes for cloning are consistent */ - CHECK(rustsecp256k1_v0_4_0_context_preallocated_clone_size(none) == rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - CHECK(rustsecp256k1_v0_4_0_context_preallocated_clone_size(sign) == rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - CHECK(rustsecp256k1_v0_4_0_context_preallocated_clone_size(vrfy) == rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - CHECK(rustsecp256k1_v0_4_0_context_preallocated_clone_size(both) == rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + CHECK(rustsecp256k1_v0_4_1_context_preallocated_clone_size(none) == rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(rustsecp256k1_v0_4_1_context_preallocated_clone_size(sign) == rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + CHECK(rustsecp256k1_v0_4_1_context_preallocated_clone_size(vrfy) == rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + CHECK(rustsecp256k1_v0_4_1_context_preallocated_clone_size(both) == rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); /*** clone and destroy all of them to make sure cloning was complete ***/ { - rustsecp256k1_v0_4_0_context *ctx_tmp; + rustsecp256k1_v0_4_1_context *ctx_tmp; if (use_prealloc) { /* clone into a non-preallocated context and then again into a new preallocated one. */ - ctx_tmp = none; none = rustsecp256k1_v0_4_0_context_clone(none); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); - free(none_prealloc); none_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); - ctx_tmp = none; none = rustsecp256k1_v0_4_0_context_preallocated_clone(none, none_prealloc); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); + ctx_tmp = none; none = rustsecp256k1_v0_4_1_context_clone(none); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); + free(none_prealloc); none_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); + ctx_tmp = none; none = rustsecp256k1_v0_4_1_context_preallocated_clone(none, none_prealloc); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = rustsecp256k1_v0_4_0_context_clone(sign); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); - free(sign_prealloc); sign_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); - ctx_tmp = sign; sign = rustsecp256k1_v0_4_0_context_preallocated_clone(sign, sign_prealloc); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); + ctx_tmp = sign; sign = rustsecp256k1_v0_4_1_context_clone(sign); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); + free(sign_prealloc); sign_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); + ctx_tmp = sign; sign = rustsecp256k1_v0_4_1_context_preallocated_clone(sign, sign_prealloc); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_0_context_clone(vrfy); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); - free(vrfy_prealloc); vrfy_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_0_context_preallocated_clone(vrfy, vrfy_prealloc); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_1_context_clone(vrfy); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); + free(vrfy_prealloc); vrfy_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_1_context_preallocated_clone(vrfy, vrfy_prealloc); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); - ctx_tmp = both; both = rustsecp256k1_v0_4_0_context_clone(both); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); - free(both_prealloc); both_prealloc = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); - ctx_tmp = both; both = rustsecp256k1_v0_4_0_context_preallocated_clone(both, both_prealloc); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); + ctx_tmp = both; both = rustsecp256k1_v0_4_1_context_clone(both); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); + free(both_prealloc); both_prealloc = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); + ctx_tmp = both; both = rustsecp256k1_v0_4_1_context_preallocated_clone(both, both_prealloc); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); } else { /* clone into a preallocated context and then again into a new non-preallocated one. */ void *prealloc_tmp; - prealloc_tmp = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); - ctx_tmp = none; none = rustsecp256k1_v0_4_0_context_preallocated_clone(none, prealloc_tmp); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); - ctx_tmp = none; none = rustsecp256k1_v0_4_0_context_clone(none); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); + ctx_tmp = none; none = rustsecp256k1_v0_4_1_context_preallocated_clone(none, prealloc_tmp); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); + ctx_tmp = none; none = rustsecp256k1_v0_4_1_context_clone(none); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); - ctx_tmp = sign; sign = rustsecp256k1_v0_4_0_context_preallocated_clone(sign, prealloc_tmp); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = rustsecp256k1_v0_4_0_context_clone(sign); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); + ctx_tmp = sign; sign = rustsecp256k1_v0_4_1_context_preallocated_clone(sign, prealloc_tmp); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); + ctx_tmp = sign; sign = rustsecp256k1_v0_4_1_context_clone(sign); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_0_context_preallocated_clone(vrfy, prealloc_tmp); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_0_context_clone(vrfy); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_1_context_preallocated_clone(vrfy, prealloc_tmp); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); + ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_4_1_context_clone(vrfy); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); - prealloc_tmp = malloc(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = both; both = rustsecp256k1_v0_4_0_context_preallocated_clone(both, prealloc_tmp); rustsecp256k1_v0_4_0_context_destroy(ctx_tmp); - ctx_tmp = both; both = rustsecp256k1_v0_4_0_context_clone(both); rustsecp256k1_v0_4_0_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = both; both = rustsecp256k1_v0_4_1_context_preallocated_clone(both, prealloc_tmp); rustsecp256k1_v0_4_1_context_destroy(ctx_tmp); + ctx_tmp = both; both = rustsecp256k1_v0_4_1_context_clone(both); rustsecp256k1_v0_4_1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); } } /* Verify that the error callback makes it across the clone. */ CHECK(sign->error_callback.fn != vrfy->error_callback.fn); - CHECK(sign->error_callback.fn == rustsecp256k1_v0_4_0_default_illegal_callback_fn); + CHECK(sign->error_callback.fn == rustsecp256k1_v0_4_1_default_illegal_callback_fn); /* And that it resets back to default. */ - rustsecp256k1_v0_4_0_context_set_error_callback(sign, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_error_callback(sign, NULL, NULL); CHECK(vrfy->error_callback.fn == sign->error_callback.fn); /*** attempt to use them ***/ random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_4_0_ge_set_gej(&pub, &pubj); + rustsecp256k1_v0_4_1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_4_1_ge_set_gej(&pub, &pubj); /* Verify context-type checking illegal-argument errors. */ memset(ctmp, 1, 32); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); CHECK(ecount == 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); CHECK(ecount == 2); VG_UNDEF(&sig, sizeof(sig)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); VG_CHECK(&sig, sizeof(sig)); CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); CHECK(ecount2 == 11); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 12); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 13); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(vrfy, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(vrfy, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(sign, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(sign, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(sign, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(sign, NULL) == 0); CHECK(ecount2 == 14); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_context_randomize(vrfy, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_context_randomize(vrfy, ctmp) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_context_randomize(vrfy, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_context_randomize(vrfy, NULL) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_context_randomize(sign, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_context_randomize(sign, ctmp) == 1); CHECK(ecount2 == 14); - CHECK(rustsecp256k1_v0_4_0_context_randomize(sign, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_context_randomize(sign, NULL) == 1); CHECK(ecount2 == 14); - rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, NULL, NULL); - rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, NULL, NULL); /* obtain a working nonce */ do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_4_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + } while(!rustsecp256k1_v0_4_1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try signing */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try verifying */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); /* cleanup */ if (use_prealloc) { - rustsecp256k1_v0_4_0_context_preallocated_destroy(none); - rustsecp256k1_v0_4_0_context_preallocated_destroy(sign); - rustsecp256k1_v0_4_0_context_preallocated_destroy(vrfy); - rustsecp256k1_v0_4_0_context_preallocated_destroy(both); + rustsecp256k1_v0_4_1_context_preallocated_destroy(none); + rustsecp256k1_v0_4_1_context_preallocated_destroy(sign); + rustsecp256k1_v0_4_1_context_preallocated_destroy(vrfy); + rustsecp256k1_v0_4_1_context_preallocated_destroy(both); free(none_prealloc); free(sign_prealloc); free(vrfy_prealloc); free(both_prealloc); } else { - rustsecp256k1_v0_4_0_context_destroy(none); - rustsecp256k1_v0_4_0_context_destroy(sign); - rustsecp256k1_v0_4_0_context_destroy(vrfy); - rustsecp256k1_v0_4_0_context_destroy(both); + rustsecp256k1_v0_4_1_context_destroy(none); + rustsecp256k1_v0_4_1_context_destroy(sign); + rustsecp256k1_v0_4_1_context_destroy(vrfy); + rustsecp256k1_v0_4_1_context_destroy(both); } /* Defined as no-op. */ - rustsecp256k1_v0_4_0_context_destroy(NULL); - rustsecp256k1_v0_4_0_context_preallocated_destroy(NULL); + rustsecp256k1_v0_4_1_context_destroy(NULL); + rustsecp256k1_v0_4_1_context_preallocated_destroy(NULL); } @@ -339,81 +345,100 @@ void run_scratch_tests(void) { int32_t ecount = 0; size_t checkpoint; size_t checkpoint_2; - rustsecp256k1_v0_4_0_context *none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_4_0_scratch_space *scratch; - rustsecp256k1_v0_4_0_scratch_space local_scratch; + rustsecp256k1_v0_4_1_context *none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_4_1_scratch_space *scratch; + rustsecp256k1_v0_4_1_scratch_space local_scratch; /* Test public API */ - rustsecp256k1_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_4_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - scratch = rustsecp256k1_v0_4_0_scratch_space_create(none, 1000); + scratch = rustsecp256k1_v0_4_1_scratch_space_create(none, 1000); CHECK(scratch != NULL); CHECK(ecount == 0); /* Test internal API */ - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); CHECK(scratch->alloc_size == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating 500 bytes succeeds */ - checkpoint = rustsecp256k1_v0_4_0_scratch_checkpoint(&none->error_callback, scratch); - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + checkpoint = rustsecp256k1_v0_4_1_scratch_checkpoint(&none->error_callback, scratch); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating another 501 bytes fails */ - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&none->error_callback, scratch, 501) == NULL); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&none->error_callback, scratch, 501) == NULL); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* ...but it succeeds once we apply the checkpoint to undo it */ - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(scratch->alloc_size == 0); - CHECK(rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); CHECK(scratch->alloc_size != 0); /* try to apply a bad checkpoint */ - checkpoint_2 = rustsecp256k1_v0_4_0_scratch_checkpoint(&none->error_callback, scratch); - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + checkpoint_2 = rustsecp256k1_v0_4_1_scratch_checkpoint(&none->error_callback, scratch); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(ecount == 0); - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ CHECK(ecount == 1); - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ CHECK(ecount == 2); /* try to use badly initialized scratch space */ - rustsecp256k1_v0_4_0_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_4_1_scratch_space_destroy(none, scratch); memset(&local_scratch, 0, sizeof(local_scratch)); scratch = &local_scratch; - CHECK(!rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, 0)); + CHECK(!rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, 0)); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); CHECK(ecount == 4); - rustsecp256k1_v0_4_0_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_4_1_scratch_space_destroy(none, scratch); CHECK(ecount == 5); /* Test that large integers do not wrap around in a bad way */ - scratch = rustsecp256k1_v0_4_0_scratch_space_create(none, 1000); + scratch = rustsecp256k1_v0_4_1_scratch_space_create(none, 1000); /* Try max allocation with a large number of objects. Only makes sense if * ALIGNMENT is greater than 1 because otherwise the objects take no extra * space. */ - CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_4_0_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); + CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_4_1_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); /* Try allocating SIZE_MAX to test wrap around which only happens if * ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch * space is too small. */ - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL); - rustsecp256k1_v0_4_0_scratch_space_destroy(none, scratch); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL); + rustsecp256k1_v0_4_1_scratch_space_destroy(none, scratch); /* cleanup */ - rustsecp256k1_v0_4_0_scratch_space_destroy(none, NULL); /* no-op */ - rustsecp256k1_v0_4_0_context_destroy(none); + rustsecp256k1_v0_4_1_scratch_space_destroy(none, NULL); /* no-op */ + rustsecp256k1_v0_4_1_context_destroy(none); +} + +void run_ctz_tests(void) { + static const uint32_t b32[] = {1, 0xffffffff, 0x5e56968f, 0xe0d63129}; + static const uint64_t b64[] = {1, 0xffffffffffffffff, 0xbcd02462139b3fc3, 0x98b5f80c769693ef}; + int shift; + unsigned i; + for (i = 0; i < sizeof(b32) / sizeof(b32[0]); ++i) { + for (shift = 0; shift < 32; ++shift) { + CHECK(rustsecp256k1_v0_4_1_ctz32_var_debruijn(b32[i] << shift) == shift); + CHECK(rustsecp256k1_v0_4_1_ctz32_var(b32[i] << shift) == shift); + } + } + for (i = 0; i < sizeof(b64) / sizeof(b64[0]); ++i) { + for (shift = 0; shift < 64; ++shift) { + CHECK(rustsecp256k1_v0_4_1_ctz64_var_debruijn(b64[i] << shift) == shift); + CHECK(rustsecp256k1_v0_4_1_ctz64_var(b64[i] << shift) == shift); + } + } } /***** HASH TESTS *****/ @@ -438,18 +463,18 @@ void run_sha256_tests(void) { int i; for (i = 0; i < 8; i++) { unsigned char out[32]; - rustsecp256k1_v0_4_0_sha256 hasher; - rustsecp256k1_v0_4_0_sha256_initialize(&hasher); - rustsecp256k1_v0_4_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - rustsecp256k1_v0_4_0_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_4_1_sha256 hasher; + rustsecp256k1_v0_4_1_sha256_initialize(&hasher); + rustsecp256k1_v0_4_1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_4_1_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_4_0_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_4_0_sha256_initialize(&hasher); - rustsecp256k1_v0_4_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_4_0_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - rustsecp256k1_v0_4_0_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, outputs[i], 32) == 0); + int split = rustsecp256k1_v0_4_1_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_4_1_sha256_initialize(&hasher); + rustsecp256k1_v0_4_1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_4_1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_4_1_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -481,19 +506,19 @@ void run_hmac_sha256_tests(void) { }; int i; for (i = 0; i < 6; i++) { - rustsecp256k1_v0_4_0_hmac_sha256 hasher; + rustsecp256k1_v0_4_1_hmac_sha256 hasher; unsigned char out[32]; - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_4_0_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_4_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, outputs[i], 32) == 0); + int split = rustsecp256k1_v0_4_1_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_4_1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -513,30 +538,30 @@ void run_rfc6979_hmac_sha256_tests(void) { {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} }; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng; unsigned char out[32]; int i; - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, key1, 64); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, out1[i], 32) == 0); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, out1[i], 32) == 0); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, key1, 65); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, out1[i], 32) != 0); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, out1[i], 32) != 0); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, key2, 64); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(out, out2[i], 32) == 0); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(out, out2[i], 32) == 0); } - rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng); } /***** RANDOM TESTS *****/ @@ -558,7 +583,7 @@ void test_rand_bits(int rand32, int bits) { /* Multiply the output of all rand calls with the odd number m, which should not change the uniformity of its distribution. */ for (i = 0; i < rounds[usebits]; i++) { - uint32_t r = (rand32 ? rustsecp256k1_v0_4_0_testrand32() : rustsecp256k1_v0_4_0_testrand_bits(bits)); + uint32_t r = (rand32 ? rustsecp256k1_v0_4_1_testrand32() : rustsecp256k1_v0_4_1_testrand_bits(bits)); CHECK((((uint64_t)r) >> bits) == 0); for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { uint32_t rm = r * mults[m]; @@ -583,7 +608,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) { uint64_t x = 0; CHECK((range % subrange) == 0); for (i = 0; i < rounds; i++) { - uint32_t r = rustsecp256k1_v0_4_0_testrand_int(range); + uint32_t r = rustsecp256k1_v0_4_1_testrand_int(range); CHECK(r < range); r = r % subrange; x |= (((uint64_t)1) << r); @@ -611,202 +636,924 @@ void run_rand_int(void) { } } -/***** NUM TESTS *****/ +/***** MODINV TESTS *****/ -#ifndef USE_NUM_NONE -void random_num_negate(rustsecp256k1_v0_4_0_num *num) { - if (rustsecp256k1_v0_4_0_testrand_bits(1)) { - rustsecp256k1_v0_4_0_num_negate(num); +/* Compute the modular inverse of (odd) x mod 2^64. */ +uint64_t modinv2p64(uint64_t x) { + /* If w = 1/x mod 2^(2^L), then w*(2 - w*x) = 1/x mod 2^(2^(L+1)). See + * Hacker's Delight second edition, Henry S. Warren, Jr., pages 245-247 for + * why. Start with L=0, for which it is true for every odd x that + * 1/x=1 mod 2. Iterating 6 times gives us 1/x mod 2^64. */ + int l; + uint64_t w = 1; + CHECK(x & 1); + for (l = 0; l < 6; ++l) w *= (2 - w*x); + return w; +} + +/* compute out = (a*b) mod m; if b=NULL, treat b=1. + * + * Out is a 512-bit number (represented as 32 uint16_t's in LE order). The other + * arguments are 256-bit numbers (represented as 16 uint16_t's in LE order). */ +void mulmod256(uint16_t* out, const uint16_t* a, const uint16_t* b, const uint16_t* m) { + uint16_t mul[32]; + uint64_t c = 0; + int i, j; + int m_bitlen = 0; + int mul_bitlen = 0; + + if (b != NULL) { + /* Compute the product of a and b, and put it in mul. */ + for (i = 0; i < 32; ++i) { + for (j = i <= 15 ? 0 : i - 15; j <= i && j <= 15; j++) { + c += (uint64_t)a[j] * b[i - j]; + } + mul[i] = c & 0xFFFF; + c >>= 16; + } + CHECK(c == 0); + + /* compute the highest set bit in mul */ + for (i = 511; i >= 0; --i) { + if ((mul[i >> 4] >> (i & 15)) & 1) { + mul_bitlen = i; + break; + } + } + } else { + /* if b==NULL, set mul=a. */ + memcpy(mul, a, 32); + memset(mul + 16, 0, 32); + /* compute the highest set bit in mul */ + for (i = 255; i >= 0; --i) { + if ((mul[i >> 4] >> (i & 15)) & 1) { + mul_bitlen = i; + break; + } + } } + + /* Compute the highest set bit in m. */ + for (i = 255; i >= 0; --i) { + if ((m[i >> 4] >> (i & 15)) & 1) { + m_bitlen = i; + break; + } + } + + /* Try do mul -= m<= 0; --i) { + uint16_t mul2[32]; + int64_t cs; + + /* Compute mul2 = mul - m<= 0 && bitpos < 256) { + sub |= ((m[bitpos >> 4] >> (bitpos & 15)) & 1) << p; + } + } + /* Add mul[j]-sub to accumulator, and shift bottom 16 bits out to mul2[j]. */ + cs += mul[j]; + cs -= sub; + mul2[j] = (cs & 0xFFFF); + cs >>= 16; + } + /* If remainder of subtraction is 0, set mul = mul2. */ + if (cs == 0) { + memcpy(mul, mul2, sizeof(mul)); + } + } + /* Sanity check: test that all limbs higher than m's highest are zero */ + for (i = (m_bitlen >> 4) + 1; i < 32; ++i) { + CHECK(mul[i] == 0); + } + memcpy(out, mul, 32); } -void random_num_order_test(rustsecp256k1_v0_4_0_num *num) { - rustsecp256k1_v0_4_0_scalar sc; - random_scalar_order_test(&sc); - rustsecp256k1_v0_4_0_scalar_get_num(num, &sc); -} - -void random_num_order(rustsecp256k1_v0_4_0_num *num) { - rustsecp256k1_v0_4_0_scalar sc; - random_scalar_order(&sc); - rustsecp256k1_v0_4_0_scalar_get_num(num, &sc); -} - -void test_num_negate(void) { - rustsecp256k1_v0_4_0_num n1; - rustsecp256k1_v0_4_0_num n2; - random_num_order_test(&n1); /* n1 = R */ - random_num_negate(&n1); - rustsecp256k1_v0_4_0_num_copy(&n2, &n1); /* n2 = R */ - rustsecp256k1_v0_4_0_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&n1)); - rustsecp256k1_v0_4_0_num_copy(&n1, &n2); /* n1 = R */ - rustsecp256k1_v0_4_0_num_negate(&n1); /* n1 = -R */ - CHECK(!rustsecp256k1_v0_4_0_num_is_zero(&n1)); - rustsecp256k1_v0_4_0_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&n1)); - rustsecp256k1_v0_4_0_num_copy(&n1, &n2); /* n1 = R */ - rustsecp256k1_v0_4_0_num_negate(&n1); /* n1 = -R */ - CHECK(rustsecp256k1_v0_4_0_num_is_neg(&n1) != rustsecp256k1_v0_4_0_num_is_neg(&n2)); - rustsecp256k1_v0_4_0_num_negate(&n1); /* n1 = R */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&n1, &n2)); -} - -void test_num_add_sub(void) { +/* Convert a 256-bit number represented as 16 uint16_t's to signed30 notation. */ +void uint16_to_signed30(rustsecp256k1_v0_4_1_modinv32_signed30* out, const uint16_t* in) { int i; - rustsecp256k1_v0_4_0_scalar s; - rustsecp256k1_v0_4_0_num n1; - rustsecp256k1_v0_4_0_num n2; - rustsecp256k1_v0_4_0_num n1p2, n2p1, n1m2, n2m1; - random_num_order_test(&n1); /* n1 = R1 */ - if (rustsecp256k1_v0_4_0_testrand_bits(1)) { - random_num_negate(&n1); - } - random_num_order_test(&n2); /* n2 = R2 */ - if (rustsecp256k1_v0_4_0_testrand_bits(1)) { - random_num_negate(&n2); - } - rustsecp256k1_v0_4_0_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ - rustsecp256k1_v0_4_0_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ - rustsecp256k1_v0_4_0_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ - rustsecp256k1_v0_4_0_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&n1p2, &n2p1)); - CHECK(!rustsecp256k1_v0_4_0_num_eq(&n1p2, &n1m2)); - rustsecp256k1_v0_4_0_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&n2m1, &n1m2)); - CHECK(!rustsecp256k1_v0_4_0_num_eq(&n2m1, &n1)); - rustsecp256k1_v0_4_0_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&n2m1, &n1)); - CHECK(!rustsecp256k1_v0_4_0_num_eq(&n2p1, &n1)); - rustsecp256k1_v0_4_0_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&n2p1, &n1)); - - /* check is_one */ - rustsecp256k1_v0_4_0_scalar_set_int(&s, 1); - rustsecp256k1_v0_4_0_scalar_get_num(&n1, &s); - CHECK(rustsecp256k1_v0_4_0_num_is_one(&n1)); - /* check that 2^n + 1 is never 1 */ - rustsecp256k1_v0_4_0_scalar_get_num(&n2, &s); - for (i = 0; i < 250; ++i) { - rustsecp256k1_v0_4_0_num_add(&n1, &n1, &n1); /* n1 *= 2 */ - rustsecp256k1_v0_4_0_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ - CHECK(!rustsecp256k1_v0_4_0_num_is_one(&n1p2)); + memset(out->v, 0, sizeof(out->v)); + for (i = 0; i < 256; ++i) { + out->v[i / 30] |= (int32_t)(((in[i >> 4]) >> (i & 15)) & 1) << (i % 30); } } -void test_num_mod(void) { +/* Convert a 256-bit number in signed30 notation to a representation as 16 uint16_t's. */ +void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_4_1_modinv32_signed30* in) { int i; - rustsecp256k1_v0_4_0_scalar s; - rustsecp256k1_v0_4_0_num order, n; + memset(out, 0, 32); + for (i = 0; i < 256; ++i) { + out[i >> 4] |= (((in->v[i / 30]) >> (i % 30)) & 1) << (i & 15); + } +} - /* check that 0 mod anything is 0 */ - random_scalar_order_test(&s); - rustsecp256k1_v0_4_0_scalar_get_num(&order, &s); - rustsecp256k1_v0_4_0_scalar_set_int(&s, 0); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &s); - rustsecp256k1_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&n)); +/* Randomly mutate the sign of limbs in signed30 representation, without changing the value. */ +void mutate_sign_signed30(rustsecp256k1_v0_4_1_modinv32_signed30* x) { + int i; + for (i = 0; i < 16; ++i) { + int pos = rustsecp256k1_v0_4_1_testrand_int(8); + if (x->v[pos] > 0 && x->v[pos + 1] <= 0x3fffffff) { + x->v[pos] -= 0x40000000; + x->v[pos + 1] += 1; + } else if (x->v[pos] < 0 && x->v[pos + 1] >= 0x3fffffff) { + x->v[pos] += 0x40000000; + x->v[pos + 1] -= 1; + } + } +} - /* check that anything mod 1 is 0 */ - rustsecp256k1_v0_4_0_scalar_set_int(&s, 1); - rustsecp256k1_v0_4_0_scalar_get_num(&order, &s); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &s); - rustsecp256k1_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&n)); +/* Test rustsecp256k1_v0_4_1_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ +void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { + uint16_t tmp[16]; + rustsecp256k1_v0_4_1_modinv32_signed30 x; + rustsecp256k1_v0_4_1_modinv32_modinfo m; + int i, vartime, nonzero; - /* check that increasing the number past 2^256 does not break this */ - random_scalar_order_test(&s); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &s); - /* multiply by 2^8, which'll test this case with high probability */ + uint16_to_signed30(&x, in); + nonzero = (x.v[0] | x.v[1] | x.v[2] | x.v[3] | x.v[4] | x.v[5] | x.v[6] | x.v[7] | x.v[8]) != 0; + uint16_to_signed30(&m.modulus, mod); + mutate_sign_signed30(&m.modulus); + + /* compute 1/modulus mod 2^30 */ + m.modulus_inv30 = modinv2p64(m.modulus.v[0]) & 0x3fffffff; + CHECK(((m.modulus_inv30 * m.modulus.v[0]) & 0x3fffffff) == 1); + + for (vartime = 0; vartime < 2; ++vartime) { + /* compute inverse */ + (vartime ? rustsecp256k1_v0_4_1_modinv32_var : rustsecp256k1_v0_4_1_modinv32)(&x, &m); + + /* produce output */ + signed30_to_uint16(out, &x); + + /* check if the inverse times the input is 1 (mod m), unless x is 0. */ + mulmod256(tmp, out, in, mod); + CHECK(tmp[0] == nonzero); + for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); + + /* invert again */ + (vartime ? rustsecp256k1_v0_4_1_modinv32_var : rustsecp256k1_v0_4_1_modinv32)(&x, &m); + + /* check if the result is equal to the input */ + signed30_to_uint16(tmp, &x); + for (i = 0; i < 16; ++i) CHECK(tmp[i] == in[i]); + } +} + +#ifdef SECP256K1_WIDEMUL_INT128 +/* Convert a 256-bit number represented as 16 uint16_t's to signed62 notation. */ +void uint16_to_signed62(rustsecp256k1_v0_4_1_modinv64_signed62* out, const uint16_t* in) { + int i; + memset(out->v, 0, sizeof(out->v)); + for (i = 0; i < 256; ++i) { + out->v[i / 62] |= (int64_t)(((in[i >> 4]) >> (i & 15)) & 1) << (i % 62); + } +} + +/* Convert a 256-bit number in signed62 notation to a representation as 16 uint16_t's. */ +void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_4_1_modinv64_signed62* in) { + int i; + memset(out, 0, 32); + for (i = 0; i < 256; ++i) { + out[i >> 4] |= (((in->v[i / 62]) >> (i % 62)) & 1) << (i & 15); + } +} + +/* Randomly mutate the sign of limbs in signed62 representation, without changing the value. */ +void mutate_sign_signed62(rustsecp256k1_v0_4_1_modinv64_signed62* x) { + static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int i; for (i = 0; i < 8; ++i) { - rustsecp256k1_v0_4_0_num_add(&n, &n, &n); + int pos = rustsecp256k1_v0_4_1_testrand_int(4); + if (x->v[pos] > 0 && x->v[pos + 1] <= M62) { + x->v[pos] -= (M62 + 1); + x->v[pos + 1] += 1; + } else if (x->v[pos] < 0 && x->v[pos + 1] >= -M62) { + x->v[pos] += (M62 + 1); + x->v[pos + 1] -= 1; + } } - rustsecp256k1_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&n)); } -void test_num_jacobi(void) { - rustsecp256k1_v0_4_0_scalar sqr; - rustsecp256k1_v0_4_0_scalar small; - rustsecp256k1_v0_4_0_scalar five; /* five is not a quadratic residue */ - rustsecp256k1_v0_4_0_num order, n; - int i; - /* squares mod 5 are 1, 4 */ - const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; +/* Test rustsecp256k1_v0_4_1_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ +void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { + static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + uint16_t tmp[16]; + rustsecp256k1_v0_4_1_modinv64_signed62 x; + rustsecp256k1_v0_4_1_modinv64_modinfo m; + int i, vartime, nonzero; - /* check some small values with 5 as the order */ - rustsecp256k1_v0_4_0_scalar_set_int(&five, 5); - rustsecp256k1_v0_4_0_scalar_get_num(&order, &five); - for (i = 0; i < 10; ++i) { - rustsecp256k1_v0_4_0_scalar_set_int(&small, i); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &small); - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == jacobi5[i]); - } + uint16_to_signed62(&x, in); + nonzero = (x.v[0] | x.v[1] | x.v[2] | x.v[3] | x.v[4]) != 0; + uint16_to_signed62(&m.modulus, mod); + mutate_sign_signed62(&m.modulus); - /** test large values with 5 as group order */ - rustsecp256k1_v0_4_0_scalar_get_num(&order, &five); - /* we first need a scalar which is not a multiple of 5 */ - do { - rustsecp256k1_v0_4_0_num fiven; - random_scalar_order_test(&sqr); - rustsecp256k1_v0_4_0_scalar_get_num(&fiven, &five); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &sqr); - rustsecp256k1_v0_4_0_num_mod(&n, &fiven); - } while (rustsecp256k1_v0_4_0_num_is_zero(&n)); - /* next force it to be a residue. 2 is a nonresidue mod 5 so we can - * just multiply by two, i.e. add the number to itself */ - if (rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == -1) { - rustsecp256k1_v0_4_0_num_add(&n, &n, &n); - } + /* compute 1/modulus mod 2^62 */ + m.modulus_inv62 = modinv2p64(m.modulus.v[0]) & M62; + CHECK(((m.modulus_inv62 * m.modulus.v[0]) & M62) == 1); - /* test residue */ - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - rustsecp256k1_v0_4_0_num_add(&n, &n, &n); - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == -1); + for (vartime = 0; vartime < 2; ++vartime) { + /* compute inverse */ + (vartime ? rustsecp256k1_v0_4_1_modinv64_var : rustsecp256k1_v0_4_1_modinv64)(&x, &m); - /** test with secp group order as order */ - rustsecp256k1_v0_4_0_scalar_order_get_num(&order); - random_scalar_order_test(&sqr); - rustsecp256k1_v0_4_0_scalar_sqr(&sqr, &sqr); - /* test residue */ - rustsecp256k1_v0_4_0_scalar_get_num(&n, &sqr); - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - rustsecp256k1_v0_4_0_scalar_mul(&sqr, &sqr, &five); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &sqr); - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == -1); - /* test multiple of the order*/ - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&order, &order) == 0); + /* produce output */ + signed62_to_uint16(out, &x); - /* check one less than the order */ - rustsecp256k1_v0_4_0_scalar_set_int(&small, 1); - rustsecp256k1_v0_4_0_scalar_get_num(&n, &small); - rustsecp256k1_v0_4_0_num_sub(&n, &order, &n); - CHECK(rustsecp256k1_v0_4_0_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ -} + /* check if the inverse times the input is 1 (mod m), unless x is 0. */ + mulmod256(tmp, out, in, mod); + CHECK(tmp[0] == nonzero); + for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); -void run_num_smalltests(void) { - int i; - for (i = 0; i < 100*count; i++) { - test_num_negate(); - test_num_add_sub(); - test_num_mod(); - test_num_jacobi(); + /* invert again */ + (vartime ? rustsecp256k1_v0_4_1_modinv64_var : rustsecp256k1_v0_4_1_modinv64)(&x, &m); + + /* check if the result is equal to the input */ + signed62_to_uint16(tmp, &x); + for (i = 0; i < 16; ++i) CHECK(tmp[i] == in[i]); } } #endif +/* test if a and b are coprime */ +int coprime(const uint16_t* a, const uint16_t* b) { + uint16_t x[16], y[16], t[16]; + int i; + int iszero; + memcpy(x, a, 32); + memcpy(y, b, 32); + + /* simple gcd loop: while x!=0, (x,y)=(y%x,x) */ + while (1) { + iszero = 1; + for (i = 0; i < 16; ++i) { + if (x[i] != 0) { + iszero = 0; + break; + } + } + if (iszero) break; + mulmod256(t, y, NULL, x); + memcpy(y, x, 32); + memcpy(x, t, 32); + } + + /* return whether y=1 */ + if (y[0] != 1) return 0; + for (i = 1; i < 16; ++i) { + if (y[i] != 0) return 0; + } + return 1; +} + +void run_modinv_tests(void) { + /* Fixed test cases. Each tuple is (input, modulus, output), each as 16x16 bits in LE order. */ + static const uint16_t CASES[][3][16] = { + /* Test cases triggering edge cases in divsteps */ + + /* Test case known to need 713 divsteps */ + {{0x1513, 0x5389, 0x54e9, 0x2798, 0x1957, 0x66a0, 0x8057, 0x3477, + 0x7784, 0x1052, 0x326a, 0x9331, 0x6506, 0xa95c, 0x91f3, 0xfb5e}, + {0x2bdd, 0x8df4, 0xcc61, 0x481f, 0xdae5, 0x5ca7, 0xf43b, 0x7d54, + 0x13d6, 0x469b, 0x2294, 0x20f4, 0xb2a4, 0xa2d1, 0x3ff1, 0xfd4b}, + {0xffd8, 0xd9a0, 0x456e, 0x81bb, 0xbabd, 0x6cea, 0x6dbd, 0x73ab, + 0xbb94, 0x3d3c, 0xdf08, 0x31c4, 0x3e32, 0xc179, 0x2486, 0xb86b}}, + /* Test case known to need 589 divsteps, reaching delta=-140 and + delta=141. */ + {{0x3fb1, 0x903b, 0x4eb7, 0x4813, 0xd863, 0x26bf, 0xd89f, 0xa8a9, + 0x02fe, 0x57c6, 0x554a, 0x4eab, 0x165e, 0x3d61, 0xee1e, 0x456c}, + {0x9295, 0x823b, 0x5c1f, 0x5386, 0x48e0, 0x02ff, 0x4c2a, 0xa2da, + 0xe58f, 0x967c, 0xc97e, 0x3f5a, 0x69fb, 0x52d9, 0x0a86, 0xb4a3}, + {0x3d30, 0xb893, 0xa809, 0xa7a8, 0x26f5, 0x5b42, 0x55be, 0xf4d0, + 0x12c2, 0x7e6a, 0xe41a, 0x90c7, 0xebfa, 0xf920, 0x304e, 0x1419}}, + /* Test case known to need 650 divsteps, and doing 65 consecutive (f,g/2) steps. */ + {{0x8583, 0x5058, 0xbeae, 0xeb69, 0x48bc, 0x52bb, 0x6a9d, 0xcc94, + 0x2a21, 0x87d5, 0x5b0d, 0x42f6, 0x5b8a, 0x2214, 0xe9d6, 0xa040}, + {0x7531, 0x27cb, 0x7e53, 0xb739, 0x6a5f, 0x83f5, 0xa45c, 0xcb1d, + 0x8a87, 0x1c9c, 0x51d7, 0x851c, 0xb9d8, 0x1fbe, 0xc241, 0xd4a3}, + {0xcdb4, 0x275c, 0x7d22, 0xa906, 0x0173, 0xc054, 0x7fdf, 0x5005, + 0x7fb8, 0x9059, 0xdf51, 0x99df, 0x2654, 0x8f6e, 0x070f, 0xb347}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xe2e9, 0xee91, 0x4345, 0xe5ad, 0xf3ec, 0x8f42, 0x0364, 0xd5c9, + 0xff49, 0xbef5, 0x4544, 0x4c7c, 0xae4b, 0xfd9d, 0xb35b, 0xda9d}, + {0x36e7, 0x8cca, 0x2ed0, 0x47b3, 0xaca4, 0xb374, 0x7d2a, 0x0772, + 0x6bdb, 0xe0a7, 0x900b, 0xfe10, 0x788c, 0x6f22, 0xd909, 0xf298}, + {0xd8c6, 0xba39, 0x13ed, 0x198c, 0x16c8, 0xb837, 0xa5f2, 0x9797, + 0x0113, 0x882a, 0x15b5, 0x324c, 0xabee, 0xe465, 0x8170, 0x85ac}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xd5b7, 0x2966, 0x040e, 0xf59a, 0x0387, 0xd96d, 0xbfbc, 0xd850, + 0x2d96, 0x872a, 0xad81, 0xc03c, 0xbb39, 0xb7fa, 0xd904, 0xef78}, + {0x6279, 0x4314, 0xfdd3, 0x1568, 0x0982, 0x4d13, 0x625f, 0x010c, + 0x22b1, 0x0cc3, 0xf22d, 0x5710, 0x1109, 0x5751, 0x7714, 0xfcf2}, + {0xdb13, 0x5817, 0x232e, 0xe456, 0xbbbc, 0x6fbe, 0x4572, 0xa358, + 0xc76d, 0x928e, 0x0162, 0x5314, 0x8325, 0x5683, 0xe21b, 0xda88}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xa06f, 0x71ee, 0x3bac, 0x9ebb, 0xdeaa, 0x09ed, 0x1cf7, 0x9ec9, + 0x7158, 0x8b72, 0x5d53, 0x5479, 0x5c75, 0xbb66, 0x9125, 0xeccc}, + {0x2941, 0xd46c, 0x3cd4, 0x4a9d, 0x5c4a, 0x256b, 0xbd6c, 0x9b8e, + 0x8fe0, 0x8a14, 0xffe8, 0x2496, 0x618d, 0xa9d7, 0x5018, 0xfb29}, + {0x437c, 0xbd60, 0x7590, 0x94bb, 0x0095, 0xd35e, 0xd4fe, 0xd6da, + 0x0d4e, 0x5342, 0x4cd2, 0x169b, 0x661c, 0x1380, 0xed2d, 0x85c1}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0xfde4, 0x68d6, 0x6c48, 0x7f77, 0x1c78, 0x96de, 0x2fd9, 0xa6c2, + 0xbbb5, 0xd319, 0x69cf, 0xd4b3, 0xa321, 0xcda0, 0x172e, 0xe530}, + {0xd9e3, 0x0f60, 0x3d86, 0xeeab, 0x25ee, 0x9582, 0x2d50, 0xfe16, + 0xd4e2, 0xe3ba, 0x94e2, 0x9833, 0x6c5e, 0x8982, 0x13b6, 0xe598}, + {0xe675, 0xf55a, 0x10f6, 0xabde, 0x5113, 0xecaa, 0x61ae, 0xad9f, + 0x0c27, 0xef33, 0x62e5, 0x211d, 0x08fa, 0xa78d, 0xc675, 0x8bae}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0x21bf, 0x52d5, 0x8fd4, 0xaa18, 0x156a, 0x7247, 0xebb8, 0x5717, + 0x4eb5, 0x1421, 0xb58f, 0x3b0b, 0x5dff, 0xe533, 0xb369, 0xd28a}, + {0x9f6b, 0xe463, 0x2563, 0xc74d, 0x6d81, 0x636a, 0x8fc8, 0x7a94, + 0x9429, 0x1585, 0xf35e, 0x7ff5, 0xb64f, 0x9720, 0xba74, 0xe108}, + {0xa5ab, 0xea7b, 0xfe5e, 0x8a85, 0x13be, 0x7934, 0xe8a0, 0xa187, + 0x86b5, 0xe477, 0xb9a4, 0x75d7, 0x538f, 0xdd70, 0xc781, 0xb67d}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0xa41a, 0x3e8d, 0xf1f5, 0x9493, 0x868c, 0x5103, 0x2725, 0x3ceb, + 0x6032, 0x3624, 0xdc6b, 0x9120, 0xbf4c, 0x8821, 0x91ad, 0xb31a}, + {0x5c0b, 0xdda5, 0x20f8, 0x32a1, 0xaf73, 0x6ec5, 0x4779, 0x43d6, + 0xd454, 0x9573, 0xbf84, 0x5a58, 0xe04e, 0x307e, 0xd1d5, 0xe230}, + {0xda15, 0xbcd6, 0x7180, 0xabd3, 0x04e6, 0x6986, 0xc0d7, 0x90bb, + 0x3a4d, 0x7c95, 0xaaab, 0x9ab3, 0xda34, 0xa7f6, 0x9636, 0x6273}}, + /* example doing 123 consecutive (f,g/2) steps; 615 divsteps */ + {{0xb4d6, 0xb38f, 0x00aa, 0xebda, 0xd4c2, 0x70b8, 0x9dad, 0x58ee, + 0x68f8, 0x48d3, 0xb5ff, 0xf422, 0x9e46, 0x2437, 0x18d0, 0xd9cc}, + {0x5c83, 0xfed7, 0x97f5, 0x3f07, 0xcaad, 0x95b1, 0xb4a4, 0xb005, + 0x23af, 0xdd27, 0x6c0d, 0x932c, 0xe2b2, 0xe3ae, 0xfb96, 0xdf67}, + {0x3105, 0x0127, 0xfd48, 0x039b, 0x35f1, 0xbc6f, 0x6c0a, 0xb572, + 0xe4df, 0xebad, 0x8edc, 0xb89d, 0x9555, 0x4c26, 0x1fef, 0x997c}}, + /* example doing 123 consecutive (f,g/2) steps; 614 divsteps */ + {{0x5138, 0xd474, 0x385f, 0xc964, 0x00f2, 0x6df7, 0x862d, 0xb185, + 0xb264, 0xe9e1, 0x466c, 0xf39e, 0xafaf, 0x5f41, 0x47e2, 0xc89d}, + {0x8607, 0x9c81, 0x46a2, 0x7dcc, 0xcb0c, 0x9325, 0xe149, 0x2bde, + 0x6632, 0x2869, 0xa261, 0xb163, 0xccee, 0x22ae, 0x91e0, 0xcfd5}, + {0x831c, 0xda22, 0xb080, 0xba7a, 0x26e2, 0x54b0, 0x073b, 0x5ea0, + 0xed4b, 0xcb3d, 0xbba1, 0xbec8, 0xf2ad, 0xae0d, 0x349b, 0x17d1}}, + /* example doing 123 consecutive (f,g/2) steps; 614 divsteps */ + {{0xe9a5, 0xb4ad, 0xd995, 0x9953, 0xcdff, 0x50d7, 0xf715, 0x9dc7, + 0x3e28, 0x15a9, 0x95a3, 0x8554, 0x5b5e, 0xad1d, 0x6d57, 0x3d50}, + {0x3ad9, 0xbd60, 0x5cc7, 0x6b91, 0xadeb, 0x71f6, 0x7cc4, 0xa58a, + 0x2cce, 0xf17c, 0x38c9, 0x97ed, 0x65fb, 0x3fa6, 0xa6bc, 0xeb24}, + {0xf96c, 0x1963, 0x8151, 0xa0cc, 0x299b, 0xf277, 0x001a, 0x16bb, + 0xfd2e, 0x532d, 0x0410, 0xe117, 0x6b00, 0x44ec, 0xca6a, 0x1745}}, + /* example doing 446 (f,g/2) steps; 523 divsteps */ + {{0x3758, 0xa56c, 0xe41e, 0x4e47, 0x0975, 0xa82b, 0x107c, 0x89cf, + 0x2093, 0x5a0c, 0xda37, 0xe007, 0x6074, 0x4f68, 0x2f5a, 0xbb8a}, + {0x4beb, 0xa40f, 0x2c42, 0xd9d6, 0x97e8, 0xca7c, 0xd395, 0x894f, + 0x1f50, 0x8067, 0xa233, 0xb850, 0x1746, 0x1706, 0xbcda, 0xdf32}, + {0x762a, 0xceda, 0x4c45, 0x1ca0, 0x8c37, 0xd8c5, 0xef57, 0x7a2c, + 0x6e98, 0xe38a, 0xc50e, 0x2ca9, 0xcb85, 0x24d5, 0xc29c, 0x61f6}}, + /* example doing 446 (f,g/2) steps; 523 divsteps */ + {{0x6f38, 0x74ad, 0x7332, 0x4073, 0x6521, 0xb876, 0xa370, 0xa6bd, + 0xcea5, 0xbd06, 0x969f, 0x77c6, 0x1e69, 0x7c49, 0x7d51, 0xb6e7}, + {0x3f27, 0x4be4, 0xd81e, 0x1396, 0xb21f, 0x92aa, 0x6dc3, 0x6283, + 0x6ada, 0x3ca2, 0xc1e5, 0x8b9b, 0xd705, 0x5598, 0x8ba1, 0xe087}, + {0x6a22, 0xe834, 0xbc8d, 0xcee9, 0x42fc, 0xfc77, 0x9c45, 0x1ca8, + 0xeb66, 0xed74, 0xaaf9, 0xe75f, 0xfe77, 0x46d2, 0x179b, 0xbf3e}}, + /* example doing 336 (f,(f+g)/2) steps; 693 divsteps */ + {{0x7ea7, 0x444e, 0x84ea, 0xc447, 0x7c1f, 0xab97, 0x3de6, 0x5878, + 0x4e8b, 0xc017, 0x03e0, 0xdc40, 0xbbd0, 0x74ce, 0x0169, 0x7ab5}, + {0x4023, 0x154f, 0xfbe4, 0x8195, 0xfda0, 0xef54, 0x9e9a, 0xc703, + 0x2803, 0xf760, 0x6302, 0xed5b, 0x7157, 0x6456, 0xdd7d, 0xf14b}, + {0xb6fb, 0xe3b3, 0x0733, 0xa77e, 0x44c5, 0x3003, 0xc937, 0xdd4d, + 0x5355, 0x14e9, 0x184e, 0xcefe, 0xe6b5, 0xf2e0, 0x0a28, 0x5b74}}, + /* example doing 336 (f,(f+g)/2) steps; 687 divsteps */ + {{0xa893, 0xb5f4, 0x1ede, 0xa316, 0x242c, 0xbdcc, 0xb017, 0x0836, + 0x3a37, 0x27fb, 0xfb85, 0x251e, 0xa189, 0xb15d, 0xa4b8, 0xc24c}, + {0xb0b7, 0x57ba, 0xbb6d, 0x9177, 0xc896, 0xc7f2, 0x43b4, 0x85a6, + 0xe6c4, 0xe50e, 0x3109, 0x7ca5, 0xd73d, 0x13ff, 0x0c3d, 0xcd62}, + {0x48ca, 0xdb34, 0xe347, 0x2cef, 0x4466, 0x10fb, 0x7ee1, 0x6344, + 0x4308, 0x966d, 0xd4d1, 0xb099, 0x994f, 0xd025, 0x2187, 0x5866}}, + /* example doing 267 (g,(g-f)/2) steps; 678 divsteps */ + {{0x0775, 0x1754, 0x01f6, 0xdf37, 0xc0be, 0x8197, 0x072f, 0x6cf5, + 0x8b36, 0x8069, 0x5590, 0xb92d, 0x6084, 0x47a4, 0x23fe, 0xddd5}, + {0x8e1b, 0xda37, 0x27d9, 0x312e, 0x3a2f, 0xef6d, 0xd9eb, 0x8153, + 0xdcba, 0x9fa3, 0x9f80, 0xead5, 0x134d, 0x2ebb, 0x5ec0, 0xe032}, + {0x1cb6, 0x5a61, 0x1bed, 0x77d6, 0xd5d1, 0x7498, 0xef33, 0x2dd2, + 0x1089, 0xedbd, 0x6958, 0x16ae, 0x336c, 0x45e6, 0x4361, 0xbadc}}, + /* example doing 267 (g,(g-f)/2) steps; 676 divsteps */ + {{0x0207, 0xf948, 0xc430, 0xf36b, 0xf0a7, 0x5d36, 0x751f, 0x132c, + 0x6f25, 0xa630, 0xca1f, 0xc967, 0xaf9c, 0x34e7, 0xa38f, 0xbe9f}, + {0x5fb9, 0x7321, 0x6561, 0x5fed, 0x54ec, 0x9c3a, 0xee0e, 0x6717, + 0x49af, 0xb896, 0xf4f5, 0x451c, 0x722a, 0xf116, 0x64a9, 0xcf0b}, + {0xf4d7, 0xdb47, 0xfef2, 0x4806, 0x4cb8, 0x18c7, 0xd9a7, 0x4951, + 0x14d8, 0x5c3a, 0xd22d, 0xd7b2, 0x750c, 0x3de7, 0x8b4a, 0x19aa}}, + + /* Test cases triggering edge cases in divsteps variant starting with delta=1/2 */ + + /* example needing 590 divsteps; delta=-5/2..7/2 */ + {{0x9118, 0xb640, 0x53d7, 0x30ab, 0x2a23, 0xd907, 0x9323, 0x5b3a, + 0xb6d4, 0x538a, 0x7637, 0xfe97, 0xfd05, 0x3cc0, 0x453a, 0xfb7e}, + {0x6983, 0x4f75, 0x4ad1, 0x48ad, 0xb2d9, 0x521d, 0x3dbc, 0x9cc0, + 0x4b60, 0x0ac6, 0xd3be, 0x0fb6, 0xd305, 0x3895, 0x2da5, 0xfdf8}, + {0xcec1, 0x33ac, 0xa801, 0x8194, 0xe36c, 0x65ef, 0x103b, 0xca54, + 0xfa9b, 0xb41d, 0x9b52, 0xb6f7, 0xa611, 0x84aa, 0x3493, 0xbf54}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0xb5f2, 0x42d0, 0x35e8, 0x8ca0, 0x4b62, 0x6e1d, 0xbdf3, 0x890e, + 0x8c82, 0x23d8, 0xc79a, 0xc8e8, 0x789e, 0x353d, 0x9766, 0xea9d}, + {0x6fa1, 0xacba, 0x4b7a, 0x5de1, 0x95d0, 0xc845, 0xebbf, 0x6f5a, + 0x30cf, 0x52db, 0x69b7, 0xe278, 0x4b15, 0x8411, 0x2ab2, 0xf3e7}, + {0xf12c, 0x9d6d, 0x95fa, 0x1878, 0x9f13, 0x4fb5, 0x3c8b, 0xa451, + 0x7182, 0xc4b6, 0x7e2a, 0x7bb7, 0x6e0e, 0x5b68, 0xde55, 0x9927}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0x229c, 0x4ef8, 0x1e93, 0xe5dc, 0xcde5, 0x6d62, 0x263b, 0xad11, + 0xced0, 0x88ff, 0xae8e, 0x3183, 0x11d2, 0xa50b, 0x350d, 0xeb40}, + {0x3157, 0xe2ea, 0x8a02, 0x0aa3, 0x5ae1, 0xb26c, 0xea27, 0x6805, + 0x87e2, 0x9461, 0x37c1, 0x2f8d, 0x85d2, 0x77a8, 0xf805, 0xeec9}, + {0x6f4e, 0x2748, 0xf7e5, 0xd8d3, 0xabe2, 0x7270, 0xc4e0, 0xedc7, + 0xf196, 0x78ca, 0x9139, 0xd8af, 0x72c6, 0xaf2f, 0x85d2, 0x6cd3}}, + /* example needing 590 divsteps; delta=-5/2..7/2 */ + {{0xdce8, 0xf1fe, 0x6708, 0x021e, 0xf1ca, 0xd609, 0x5443, 0x85ce, + 0x7a05, 0x8f9c, 0x90c3, 0x52e7, 0x8e1d, 0x97b8, 0xc0bf, 0xf2a1}, + {0xbd3d, 0xed11, 0x1625, 0xb4c5, 0x844c, 0xa413, 0x2569, 0xb9ba, + 0xcd35, 0xff84, 0xcd6e, 0x7f0b, 0x7d5d, 0x10df, 0x3efe, 0xfbe5}, + {0xa9dd, 0xafef, 0xb1b7, 0x4c8d, 0x50e4, 0xafbf, 0x2d5a, 0xb27c, + 0x0653, 0x66b6, 0x5d36, 0x4694, 0x7e35, 0xc47c, 0x857f, 0x32c5}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0x7902, 0xc9f8, 0x926b, 0xaaeb, 0x90f8, 0x1c89, 0xcce3, 0x96b7, + 0x28b2, 0x87a2, 0x136d, 0x695a, 0xa8df, 0x9061, 0x9e31, 0xee82}, + {0xd3a9, 0x3c02, 0x818c, 0x6b81, 0x34b3, 0xebbb, 0xe2c8, 0x7712, + 0xbfd6, 0x8248, 0xa6f4, 0xba6f, 0x03bb, 0xfb54, 0x7575, 0xfe89}, + {0x8246, 0x0d63, 0x478e, 0xf946, 0xf393, 0x0451, 0x08c2, 0x5919, + 0x5fd6, 0x4c61, 0xbeb7, 0x9a15, 0x30e1, 0x55fc, 0x6a01, 0x3724}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0x3eff, 0x926a, 0x77f5, 0x1fff, 0x1a5b, 0xf3ef, 0xf64b, 0x8681, + 0xf800, 0xf9bc, 0x761d, 0xe268, 0x62b0, 0xa032, 0xba9c, 0xbe56}, + {0xb8f9, 0x00e7, 0x47b7, 0xdffc, 0xfd9d, 0x5abb, 0xa19b, 0x1868, + 0x31fd, 0x3b29, 0x3674, 0x5449, 0xf54d, 0x1d19, 0x6ac7, 0xff6f}, + {0xf1d7, 0x3551, 0x5682, 0x9adf, 0xe8aa, 0x19a5, 0x8340, 0x71db, + 0xb7ab, 0x4cfd, 0xf661, 0x632c, 0xc27e, 0xd3c6, 0xdf42, 0xd306}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x3aff, 0x2ed7, 0xf2e0, 0xabc7, + 0x8aee, 0x166e, 0x7ed0, 0x9ac7, 0x714a, 0xb9c5, 0x4d58, 0xad6c}, + {0x9cf9, 0x47e2, 0xa421, 0xb277, 0xffc2, 0x2747, 0x6486, 0x94c1, + 0x1d99, 0xd49b, 0x1096, 0x991a, 0xe986, 0xae02, 0xe89b, 0xea36}, + {0x1fb4, 0x98d8, 0x19b7, 0x80e9, 0xcdac, 0xaa5a, 0xf1e6, 0x0074, + 0xe393, 0xed8b, 0x8d5c, 0xe17d, 0x81b3, 0xc16d, 0x54d3, 0x9be3}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0xd047, 0x7e36, 0x3157, 0x7ab6, 0xb4d9, 0x8dae, 0x7534, 0x4f5d, + 0x489e, 0xa8ab, 0x8a3d, 0xd52c, 0x62af, 0xa032, 0xba9c, 0xbe56}, + {0xb1f1, 0x737f, 0x5964, 0x5afb, 0x3712, 0x8ef9, 0x19f7, 0x9669, + 0x664d, 0x03ad, 0xc352, 0xf7a5, 0xf545, 0x1d19, 0x6ac7, 0xff6f}, + {0xa834, 0x5256, 0x27bc, 0x33bd, 0xba11, 0x5a7b, 0x791e, 0xe6c0, + 0x9ac4, 0x9370, 0x1130, 0x28b4, 0x2b2e, 0x231b, 0x082a, 0x796e}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0x6ab1, 0x6ea0, 0x1a99, 0xe0c2, 0xdd45, 0x645d, 0x8dbc, 0x466a, + 0xfa64, 0x4289, 0xd3f7, 0xfc8f, 0x2894, 0xe3c5, 0xa008, 0xcc14}, + {0xc75f, 0xc083, 0x4cc2, 0x64f2, 0x2aff, 0x4c12, 0x8461, 0xc4ae, + 0xbbfa, 0xb336, 0xe4b2, 0x3ac5, 0x2c22, 0xf56c, 0x5381, 0xe943}, + {0xcd80, 0x760d, 0x4395, 0xb3a6, 0xd497, 0xf583, 0x82bd, 0x1daa, + 0xbe92, 0x2613, 0xfdfb, 0x869b, 0x0425, 0xa333, 0x7056, 0xc9c5}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0x71d4, 0x64df, 0xec4f, 0x74d8, 0x7e0c, 0x40d3, 0x7073, 0x4cc8, + 0x2a2a, 0xb1ff, 0x8518, 0x6513, 0xb0ea, 0x640a, 0x62d9, 0xd5f4}, + {0xdc75, 0xd937, 0x3b13, 0x1d36, 0xdf83, 0xd034, 0x1c1c, 0x4332, + 0x4cc3, 0xeeec, 0x7d94, 0x6771, 0x3384, 0x74b0, 0x947d, 0xf2c4}, + {0x0a82, 0x37a4, 0x12d5, 0xec97, 0x972c, 0xe6bf, 0xc348, 0xa0a9, + 0xc50c, 0xdc7c, 0xae30, 0x19d1, 0x0fca, 0x35e1, 0xd6f6, 0x81ee}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0xa6b1, 0xabc5, 0x5bbc, 0x7f65, 0xdd32, 0xaa73, 0xf5a3, 0x1982, + 0xced4, 0xe949, 0x0fd6, 0x2bc4, 0x2bd7, 0xe3c5, 0xa008, 0xcc14}, + {0x4b5f, 0x8f96, 0xa375, 0xfbcf, 0x1c7d, 0xf1ec, 0x03f5, 0xb35d, + 0xb999, 0xdb1f, 0xc9a1, 0xb4c7, 0x1dd5, 0xf56c, 0x5381, 0xe943}, + {0xaa3d, 0x38b9, 0xf17d, 0xeed9, 0x9988, 0x69ee, 0xeb88, 0x1495, + 0x203f, 0x18c8, 0x82b7, 0xdcb2, 0x34a7, 0x6b00, 0x6998, 0x589a}}, + /* example doing 453 (f,g/2) steps; 514 divsteps */ + {{0xa478, 0xe60d, 0x3244, 0x60e6, 0xada3, 0xfe50, 0xb6b1, 0x2eae, + 0xd0ef, 0xa7b1, 0xef63, 0x05c0, 0xe213, 0x443e, 0x4427, 0x2448}, + {0x258f, 0xf9ef, 0xe02b, 0x92dd, 0xd7f3, 0x252b, 0xa503, 0x9089, + 0xedff, 0x96c1, 0xfe3a, 0x3a39, 0x198a, 0x981d, 0x0627, 0xedb7}, + {0x595a, 0x45be, 0x8fb0, 0x2265, 0xc210, 0x02b8, 0xdce9, 0xe241, + 0xcab6, 0xbf0d, 0x0049, 0x8d9a, 0x2f51, 0xae54, 0x5785, 0xb411}}, + /* example doing 453 (f,g/2) steps; 514 divsteps */ + {{0x48f0, 0x7db3, 0xdafe, 0x1c92, 0x5912, 0xe11a, 0xab52, 0xede1, + 0x3182, 0x8980, 0x5d2b, 0x9b5b, 0x8718, 0xda27, 0x1683, 0x1de2}, + {0x168f, 0x6f36, 0xce7a, 0xf435, 0x19d4, 0xda5e, 0x2351, 0x9af5, + 0xb003, 0x0ef5, 0x3b4c, 0xecec, 0xa9f0, 0x78e1, 0xdfef, 0xe823}, + {0x5f55, 0xfdcc, 0xb233, 0x2914, 0x84f0, 0x97d1, 0x9cf4, 0x2159, + 0xbf56, 0xb79c, 0x17a3, 0x7cef, 0xd5de, 0x34f0, 0x5311, 0x4c54}}, + /* example doing 510 (f,(f+g)/2) steps; 512 divsteps */ + {{0x2789, 0x2e04, 0x6e0e, 0xb6cd, 0xe4de, 0x4dbf, 0x228d, 0x7877, + 0xc335, 0x806b, 0x38cd, 0x8049, 0xa73b, 0xcfa2, 0x82f7, 0x9e19}, + {0xc08d, 0xb99d, 0xb8f3, 0x663d, 0xbbb3, 0x1284, 0x1485, 0x1d49, + 0xc98f, 0x9e78, 0x1588, 0x11e3, 0xd91a, 0xa2c7, 0xfff1, 0xc7b9}, + {0x1e1f, 0x411d, 0x7c49, 0x0d03, 0xe789, 0x2f8e, 0x5d55, 0xa95e, + 0x826e, 0x8de5, 0x52a0, 0x1abc, 0x4cd7, 0xd13a, 0x4395, 0x63e1}}, + /* example doing 510 (f,(f+g)/2) steps; 512 divsteps */ + {{0xd5a1, 0xf786, 0x555c, 0xb14b, 0x44ae, 0x535f, 0x4a49, 0xffc3, + 0xf497, 0x70d1, 0x57c8, 0xa933, 0xc85a, 0x1910, 0x75bf, 0x960b}, + {0xfe53, 0x5058, 0x496d, 0xfdff, 0x6fb8, 0x4100, 0x92bd, 0xe0c4, + 0xda89, 0xe0a4, 0x841b, 0x43d4, 0xa388, 0x957f, 0x99ca, 0x9abf}, + {0xe530, 0x05bc, 0xfeec, 0xfc7e, 0xbcd3, 0x1239, 0x54cb, 0x7042, + 0xbccb, 0x139e, 0x9076, 0x0203, 0x6068, 0x90c7, 0x1ddf, 0x488d}}, + /* example doing 228 (g,(g-f)/2) steps; 538 divsteps */ + {{0x9488, 0xe54b, 0x0e43, 0x81d2, 0x06e7, 0x4b66, 0x36d0, 0x53d6, + 0x2b68, 0x22ec, 0x3fa9, 0xc1a7, 0x9ad2, 0xa596, 0xb3ac, 0xdf42}, + {0xe31f, 0x0b28, 0x5f3b, 0xc1ff, 0x344c, 0xbf5f, 0xd2ec, 0x2936, + 0x9995, 0xdeb2, 0xae6c, 0x2852, 0xa2c6, 0xb306, 0x8120, 0xe305}, + {0xa56e, 0xfb98, 0x1537, 0x4d85, 0x619e, 0x866c, 0x3cd4, 0x779a, + 0xdd66, 0xa80d, 0xdc2f, 0xcae4, 0xc74c, 0x5175, 0xa65d, 0x605e}}, + /* example doing 228 (g,(g-f)/2) steps; 537 divsteps */ + {{0x8cd5, 0x376d, 0xd01b, 0x7176, 0x19ef, 0xcf09, 0x8403, 0x5e52, + 0x83c1, 0x44de, 0xb91e, 0xb33d, 0xe15c, 0x51e7, 0xbad8, 0x6359}, + {0x3b75, 0xf812, 0x5f9e, 0xa04e, 0x92d3, 0x226e, 0x540e, 0x7c9a, + 0x31c6, 0x46d2, 0x0b7b, 0xdb4a, 0xe662, 0x4950, 0x0265, 0xf76f}, + {0x09ed, 0x692f, 0xe8f1, 0x3482, 0xab54, 0x36b4, 0x8442, 0x6ae9, + 0x4329, 0x6505, 0x183b, 0x1c1d, 0x482d, 0x7d63, 0xb44f, 0xcc09}}, + + /* Test cases with the group order as modulus. */ + + /* Test case with the group order as modulus, needing 635 divsteps. */ + {{0x95ed, 0x6c01, 0xd113, 0x5ff1, 0xd7d0, 0x29cc, 0x5817, 0x6120, + 0xca8e, 0xaad1, 0x25ae, 0x8e84, 0x9af6, 0x30bf, 0xf0ed, 0x1686}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x1631, 0xbf4a, 0x286a, 0x2716, 0x469f, 0x2ac8, 0x1312, 0xe9bc, + 0x04f4, 0x304b, 0x9931, 0x113b, 0xd932, 0xc8f4, 0x0d0d, 0x01a1}}, + /* example with group size as modulus needing 631 divsteps */ + {{0x85ed, 0xc284, 0x9608, 0x3c56, 0x19b6, 0xbb5b, 0x2850, 0xdab7, + 0xa7f5, 0xe9ab, 0x06a4, 0x5bbb, 0x1135, 0xa186, 0xc424, 0xc68b}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x8479, 0x450a, 0x8fa3, 0xde05, 0xb2f5, 0x7793, 0x7269, 0xbabb, + 0xc3b3, 0xd49b, 0x3377, 0x03c6, 0xe694, 0xc760, 0xd3cb, 0x2811}}, + /* example with group size as modulus needing 565 divsteps starting at delta=1/2 */ + {{0x8432, 0x5ceb, 0xa847, 0x6f1e, 0x51dd, 0x535a, 0x6ddc, 0x70ce, + 0x6e70, 0xc1f6, 0x18f2, 0x2a7e, 0xc8e7, 0x39f8, 0x7e96, 0xebbf}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x257e, 0x449f, 0x689f, 0x89aa, 0x3989, 0xb661, 0x376c, 0x1e32, + 0x654c, 0xee2e, 0xf4e2, 0x33c8, 0x3f2f, 0x9716, 0x6046, 0xcaa3}}, + /* Test case with the group size as modulus, needing 981 divsteps with + broken eta handling. */ + {{0xfeb9, 0xb877, 0xee41, 0x7fa3, 0x87da, 0x94c4, 0x9d04, 0xc5ae, + 0x5708, 0x0994, 0xfc79, 0x0916, 0xbf32, 0x3ad8, 0xe11c, 0x5ca2}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0f12, 0x075e, 0xce1c, 0x6f92, 0xc80f, 0xca92, 0x9a04, 0x6126, + 0x4b6c, 0x57d6, 0xca31, 0x97f3, 0x1f99, 0xf4fd, 0xda4d, 0x42ce}}, + /* Test case with the group size as modulus, input = 0. */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the group size as modulus, input = 1. */ + {{0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the group size as modulus, input = 2. */ + {{0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x20a1, 0x681b, 0x2f46, 0xdfe9, 0x501d, 0x57a4, 0x6e73, 0x5d57, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x7fff}}, + /* Test case with the group size as modulus, input = group - 1. */ + {{0x4140, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x4140, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}}, + + /* Test cases with the field size as modulus. */ + + /* Test case with the field size as modulus, needing 637 divsteps. */ + {{0x9ec3, 0x1919, 0xca84, 0x7c11, 0xf996, 0x06f3, 0x5408, 0x6688, + 0x1320, 0xdb8a, 0x632a, 0x0dcb, 0x8a84, 0x6bee, 0x9c95, 0xe34e}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x18e5, 0x19b6, 0xdf92, 0x1aaa, 0x09fb, 0x8a3f, 0x52b0, 0x8701, + 0xac0c, 0x2582, 0xda44, 0x9bcc, 0x6828, 0x1c53, 0xbd8f, 0xbd2c}}, + /* example with field size as modulus needing 637 divsteps */ + {{0xaec3, 0xa7cf, 0x2f2d, 0x0693, 0x5ad5, 0xa8ff, 0x7ec7, 0x30ff, + 0x0c8b, 0xc242, 0xcab2, 0x063a, 0xf86e, 0x6057, 0x9cbd, 0xf6d8}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0310, 0x579d, 0xcb38, 0x9030, 0x3ded, 0x9bb9, 0x1234, 0x63ce, + 0x0c63, 0x8e3d, 0xacfe, 0x3c20, 0xdc85, 0xf859, 0x919e, 0x1d45}}, + /* example with field size as modulus needing 564 divsteps starting at delta=1/2 */ + {{0x63ae, 0x8d10, 0x0071, 0xdb5c, 0xb454, 0x78d1, 0x744a, 0x5f8e, + 0xe4d8, 0x87b1, 0x8e62, 0x9590, 0xcede, 0xa070, 0x36b4, 0x7f6f}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfdc8, 0xe8d5, 0xbe15, 0x9f86, 0xa5fe, 0xf18e, 0xa7ff, 0xd291, + 0xf4c2, 0x9c87, 0xf150, 0x073e, 0x69b8, 0xf7c4, 0xee4b, 0xc7e6}}, + /* Test case with the field size as modulus, needing 935 divsteps with + broken eta handling. */ + {{0x1b37, 0xbdc3, 0x8bcd, 0x25e3, 0x1eae, 0x567d, 0x30b6, 0xf0d8, + 0x9277, 0x0cf8, 0x9c2e, 0xecd7, 0x631d, 0xe38f, 0xd4f8, 0x5c93}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x1622, 0xe05b, 0xe880, 0x7de9, 0x3e45, 0xb682, 0xee6c, 0x67ed, + 0xa179, 0x15db, 0x6b0d, 0xa656, 0x7ccb, 0x8ef7, 0xa2ff, 0xe279}}, + /* Test case with the field size as modulus, input = 0. */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the field size as modulus, input = 1. */ + {{0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the field size as modulus, input = 2. */ + {{0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfe18, 0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x7fff}}, + /* Test case with the field size as modulus, input = field - 1. */ + {{0xfc2e, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfc2e, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}}, + + /* Selected from a large number of random inputs to reach small/large + * d/e values in various configurations. */ + {{0x3a08, 0x23e1, 0x4d8c, 0xe606, 0x3263, 0x67af, 0x9bf1, 0x9d70, + 0xf5fd, 0x12e4, 0x03c8, 0xb9ca, 0xe847, 0x8c5d, 0x6322, 0xbd30}, + {0x8359, 0x59dd, 0x1831, 0x7c1a, 0x1e83, 0xaee1, 0x770d, 0xcea8, + 0xfbb1, 0xeed6, 0x10b5, 0xe2c6, 0x36ea, 0xee17, 0xe32c, 0xffff}, + {0x1727, 0x0f36, 0x6f85, 0x5d0c, 0xca6c, 0x3072, 0x9628, 0x5842, + 0xcb44, 0x7c2b, 0xca4f, 0x62e5, 0x29b1, 0x6ffd, 0x9055, 0xc196}}, + {{0x905d, 0x41c8, 0xa2ff, 0x295b, 0x72bb, 0x4679, 0x6d01, 0x2c98, + 0xb3e0, 0xc537, 0xa310, 0xe07e, 0xe72f, 0x4999, 0x1148, 0xf65e}, + {0x5b41, 0x4239, 0x3c37, 0x5130, 0x30e3, 0xff35, 0xc51f, 0x1a43, + 0xdb23, 0x13cf, 0x9f49, 0xf70c, 0x5e70, 0xd411, 0x3005, 0xf8c6}, + {0xc30e, 0x68f0, 0x201a, 0xe10c, 0x864a, 0x6243, 0xe946, 0x43ae, + 0xf3f1, 0x52dc, 0x1f7f, 0x50d4, 0x2797, 0x064c, 0x5ca4, 0x90e3}}, + {{0xf1b5, 0xc6e5, 0xd2c4, 0xff95, 0x27c5, 0x0c92, 0x5d19, 0x7ae5, + 0x4fbe, 0x5438, 0x99e1, 0x880d, 0xd892, 0xa05c, 0x6ffd, 0x7eac}, + {0x2153, 0xcc9d, 0xfc6c, 0x8358, 0x49a1, 0x01e2, 0xcef0, 0x4969, + 0xd69a, 0x8cef, 0xf5b2, 0xfd95, 0xdcc2, 0x71f4, 0x6ae2, 0xceeb}, + {0x9b2e, 0xcdc6, 0x0a5c, 0x7317, 0x9084, 0xe228, 0x56cf, 0xd512, + 0x628a, 0xce21, 0x3473, 0x4e13, 0x8823, 0x1ed0, 0x34d0, 0xbfa3}}, + {{0x5bae, 0x53e5, 0x5f4d, 0x21ca, 0xb875, 0x8ecf, 0x9aa6, 0xbe3c, + 0x9f96, 0x7b82, 0x375d, 0x4d3e, 0x491c, 0xb1eb, 0x04c9, 0xb6c8}, + {0xfcfd, 0x10b7, 0x73b2, 0xd23b, 0xa357, 0x67da, 0x0d9f, 0x8702, + 0xa037, 0xff8e, 0x0e8b, 0x1801, 0x2c5c, 0x4e6e, 0x4558, 0xfff2}, + {0xc50f, 0x5654, 0x6713, 0x5ef5, 0xa7ce, 0xa647, 0xc832, 0x69ce, + 0x1d5c, 0x4310, 0x0746, 0x5a01, 0x96ea, 0xde4b, 0xa88b, 0x5543}}, + {{0xdc7f, 0x5e8c, 0x89d1, 0xb077, 0xd521, 0xcf90, 0x32fa, 0x5737, + 0x839e, 0x1464, 0x007c, 0x09c6, 0x9371, 0xe8ea, 0xc1cb, 0x75c4}, + {0xe3a3, 0x107f, 0xa82a, 0xa375, 0x4578, 0x60f4, 0x75c9, 0x5ee4, + 0x3fd7, 0x2736, 0x2871, 0xd3d2, 0x5f1d, 0x1abb, 0xa764, 0xffff}, + {0x45c6, 0x1f2e, 0xb14c, 0x84d7, 0x7bb7, 0x5a04, 0x0504, 0x3f33, + 0x5cc1, 0xb07a, 0x6a6c, 0x786f, 0x647f, 0xe1d7, 0x78a2, 0x4cf4}}, + {{0xc006, 0x356f, 0x8cd2, 0x967b, 0xb49e, 0x2d4e, 0x14bf, 0x4bcb, + 0xddab, 0xd3f9, 0xa068, 0x2c1c, 0xd242, 0xa56d, 0xf2c7, 0x5f97}, + {0x465b, 0xb745, 0x0e0d, 0x69a9, 0x987d, 0xcb37, 0xf637, 0xb311, + 0xc4d6, 0x2ddb, 0xf68f, 0x2af9, 0x959d, 0x3f53, 0x98f2, 0xf640}, + {0xc0f2, 0x6bfb, 0xf5c3, 0x91c1, 0x6b05, 0x0825, 0x5ca0, 0x7df7, + 0x9d55, 0x6d9e, 0xfe94, 0x2ad9, 0xd9f0, 0xe68b, 0xa72b, 0xd1b2}}, + {{0x2279, 0x61ba, 0x5bc6, 0x136b, 0xf544, 0x717c, 0xafda, 0x02bd, + 0x79af, 0x1fad, 0xea09, 0x81bb, 0x932b, 0x32c9, 0xdf1d, 0xe576}, + {0x8215, 0x7817, 0xca82, 0x43b0, 0x9b06, 0xea65, 0x1291, 0x0621, + 0x0089, 0x46fe, 0xc5a6, 0xddd7, 0x8065, 0xc6a0, 0x214b, 0xfc64}, + {0x04bf, 0x6f2a, 0x86b2, 0x841a, 0x4a95, 0xc632, 0x97b7, 0x5821, + 0x2b18, 0x1bb0, 0x3e97, 0x935e, 0xcc7d, 0x066b, 0xd513, 0xc251}}, + {{0x76e8, 0x5bc2, 0x3eaa, 0x04fc, 0x9974, 0x92c1, 0x7c15, 0xfa89, + 0x1151, 0x36ee, 0x48b2, 0x049c, 0x5f16, 0xcee4, 0x925b, 0xe98e}, + {0x913f, 0x0a2d, 0xa185, 0x9fea, 0xda5a, 0x4025, 0x40d7, 0x7cfa, + 0x88ca, 0xbbe8, 0xb265, 0xb7e4, 0x6cb1, 0xed64, 0xc6f9, 0xffb5}, + {0x6ab1, 0x1a86, 0x5009, 0x152b, 0x1cc4, 0xe2c8, 0x960b, 0x19d0, + 0x3554, 0xc562, 0xd013, 0xcf91, 0x10e1, 0x7933, 0xe195, 0xcf49}}, + {{0x9cb5, 0xd2d7, 0xc6ed, 0xa818, 0xb495, 0x06ee, 0x0f4a, 0x06e3, + 0x4c5a, 0x80ce, 0xd49a, 0x4cd7, 0x7487, 0x92af, 0xe516, 0x676c}, + {0xd6e9, 0x6b85, 0x619a, 0xb52c, 0x20a0, 0x2f79, 0x3545, 0x1edd, + 0x5a6f, 0x8082, 0x9b80, 0xf8f8, 0xc78a, 0xd0a3, 0xadf4, 0xffff}, + {0x01c2, 0x2118, 0xef5e, 0xa877, 0x046a, 0xd2c2, 0x2ad5, 0x951c, + 0x8900, 0xa5c9, 0x8d0f, 0x6b61, 0x55d3, 0xd572, 0x48de, 0x9219}}, + {{0x5114, 0x0644, 0x23dd, 0x01d3, 0xc101, 0xa659, 0xea17, 0x640f, + 0xf767, 0x2644, 0x9cec, 0xd8ba, 0xd6da, 0x9156, 0x8aeb, 0x875a}, + {0xc1bf, 0xdae9, 0xe96b, 0xce77, 0xf7a1, 0x3e99, 0x5c2e, 0x973b, + 0xd048, 0x5bd0, 0x4e8a, 0xcb85, 0xce39, 0x37f5, 0x815d, 0xffff}, + {0x48cc, 0x35b6, 0x26d4, 0x2ea6, 0x50d6, 0xa2f9, 0x64b6, 0x03bf, + 0xd00c, 0xe057, 0x3343, 0xfb79, 0x3ce5, 0xf717, 0xc5af, 0xe185}}, + {{0x13ff, 0x6c76, 0x2077, 0x16e0, 0xd5ca, 0xf2ad, 0x8dba, 0x8f49, + 0x7887, 0x16f9, 0xb646, 0xfc87, 0xfa31, 0x5096, 0xf08c, 0x3fbe}, + {0x8139, 0x6fd7, 0xf6df, 0xa7bf, 0x6699, 0x5361, 0x6f65, 0x13c8, + 0xf4d1, 0xe28f, 0xc545, 0x0a8c, 0x5274, 0xb0a6, 0xffff, 0xffff}, + {0x22ca, 0x0cd6, 0xc1b5, 0xb064, 0x44a7, 0x297b, 0x495f, 0x34ac, + 0xfa95, 0xec62, 0xf08d, 0x621c, 0x66a6, 0xba94, 0x84c6, 0x8ee0}}, + {{0xaa30, 0x312e, 0x439c, 0x4e88, 0x2e2f, 0x32dc, 0xb880, 0xa28e, + 0xf795, 0xc910, 0xb406, 0x8dd7, 0xb187, 0xa5a5, 0x38f1, 0xe49e}, + {0xfb19, 0xf64a, 0xba6a, 0x8ec2, 0x7255, 0xce89, 0x2cf9, 0x9cba, + 0xe1fe, 0x50da, 0x1705, 0xac52, 0xe3d4, 0x4269, 0x0648, 0xfd77}, + {0xb4c8, 0x6e8a, 0x2b5f, 0x4c2d, 0x5a67, 0xa7bb, 0x7d6d, 0x5569, + 0xa0ea, 0x244a, 0xc0f2, 0xf73d, 0x58cf, 0xac7f, 0xd32b, 0x3018}}, + {{0xc953, 0x1ae1, 0xae46, 0x8709, 0x19c2, 0xa986, 0x9abe, 0x1611, + 0x0395, 0xd5ab, 0xf0f6, 0xb5b0, 0x5b2b, 0x0317, 0x80ba, 0x376d}, + {0xfe77, 0xbc03, 0xac2f, 0x9d00, 0xa175, 0x293d, 0x3b56, 0x0e3a, + 0x0a9c, 0xf40c, 0x690e, 0x1508, 0x95d4, 0xddc4, 0xe805, 0xffff}, + {0xb1ce, 0x0929, 0xa5fe, 0x4b50, 0x9d5d, 0x8187, 0x2557, 0x4376, + 0x11ba, 0xdcef, 0xc1f3, 0xd531, 0x1824, 0x93f6, 0xd81f, 0x8f83}}, + {{0xb8d2, 0xb900, 0x4a0c, 0x7188, 0xa5bf, 0x1b0b, 0x2ae5, 0xa35b, + 0x98e0, 0x610c, 0x86db, 0x2487, 0xa267, 0x002c, 0xebb6, 0xc5f4}, + {0x9cdd, 0x1c1b, 0x2f06, 0x43d1, 0xce47, 0xc334, 0x6e60, 0xc016, + 0x989e, 0x0ab2, 0x0cac, 0x1196, 0xe2d9, 0x2e04, 0xc62b, 0xffff}, + {0xdc36, 0x1f05, 0x6aa9, 0x7a20, 0x944f, 0x2fd3, 0xa553, 0xdb4f, + 0xbd5c, 0x3a75, 0x25d4, 0xe20e, 0xa387, 0x1410, 0xdbb1, 0x1b60}}, + {{0x76b3, 0x2207, 0x4930, 0x5dd7, 0x65a0, 0xd55c, 0xb443, 0x53b7, + 0x5c22, 0x818a, 0xb2e7, 0x9de8, 0x9985, 0xed45, 0x33b1, 0x53e8}, + {0x7913, 0x44e1, 0xf15b, 0x5edd, 0x34f3, 0x4eba, 0x0758, 0x7104, + 0x32d9, 0x28f3, 0x4401, 0x85c5, 0xb695, 0xb899, 0xc0f2, 0xffff}, + {0x7f43, 0xd202, 0x24c9, 0x69f3, 0x74dc, 0x1a69, 0xeaee, 0x5405, + 0x1755, 0x4bb8, 0x04e3, 0x2fd2, 0xada8, 0x39eb, 0x5b4d, 0x96ca}}, + {{0x807b, 0x7112, 0xc088, 0xdafd, 0x02fa, 0x9d95, 0x5e42, 0xc033, + 0xde0a, 0xeecf, 0x8e90, 0x8da1, 0xb17e, 0x9a5b, 0x4c6d, 0x1914}, + {0x4871, 0xd1cb, 0x47d7, 0x327f, 0x09ec, 0x97bb, 0x2fae, 0xd346, + 0x6b78, 0x3707, 0xfeb2, 0xa6ab, 0x13df, 0x76b0, 0x8fb9, 0xffb3}, + {0x179e, 0xb63b, 0x4784, 0x231e, 0x9f42, 0x7f1a, 0xa3fb, 0xdd8c, + 0xd1eb, 0xb4c9, 0x8ca7, 0x018c, 0xf691, 0x576c, 0xa7d6, 0xce27}}, + {{0x5f45, 0x7c64, 0x083d, 0xedd5, 0x08a0, 0x0c64, 0x6c6f, 0xec3c, + 0xe2fb, 0x352c, 0x9303, 0x75e4, 0xb4e0, 0x8b09, 0xaca4, 0x7025}, + {0x1025, 0xb482, 0xfed5, 0xa678, 0x8966, 0x9359, 0x5329, 0x98bb, + 0x85b2, 0x73ba, 0x9982, 0x6fdc, 0xf190, 0xbe8c, 0xdc5c, 0xfd93}, + {0x83a2, 0x87a4, 0xa680, 0x52a1, 0x1ba1, 0x8848, 0x5db7, 0x9744, + 0x409c, 0x0745, 0x0e1e, 0x1cfc, 0x00cd, 0xf573, 0x2071, 0xccaa}}, + {{0xf61f, 0x63d4, 0x536c, 0x9eb9, 0x5ddd, 0xbb11, 0x9014, 0xe904, + 0xfe01, 0x6b45, 0x1858, 0xcb5b, 0x4c38, 0x43e1, 0x381d, 0x7f94}, + {0xf61f, 0x63d4, 0xd810, 0x7ca3, 0x8a04, 0x4b83, 0x11fc, 0xdf94, + 0x4169, 0xbd05, 0x608e, 0x7151, 0x4fbf, 0xb31a, 0x38a7, 0xa29b}, + {0xe621, 0xdfa5, 0x3d06, 0x1d03, 0x81e6, 0x00da, 0x53a6, 0x965e, + 0x93e5, 0x2164, 0x5b61, 0x59b8, 0xa629, 0x8d73, 0x699a, 0x6111}}, + {{0x4cc3, 0xd29e, 0xf4a3, 0x3428, 0x2048, 0xeec9, 0x5f50, 0x99a4, + 0x6de9, 0x05f2, 0x5aa9, 0x5fd2, 0x98b4, 0x1adc, 0x225f, 0x777f}, + {0xe649, 0x37da, 0x5ba6, 0x5765, 0x3f4a, 0x8a1c, 0x2e79, 0xf550, + 0x1a54, 0xcd1e, 0x7218, 0x3c3c, 0x6311, 0xfe28, 0x95fb, 0xed97}, + {0xe9b6, 0x0c47, 0x3f0e, 0x849b, 0x11f8, 0xe599, 0x5e4d, 0xd618, + 0xa06d, 0x33a0, 0x9a3e, 0x44db, 0xded8, 0x10f0, 0x94d2, 0x81fb}}, + {{0x2e59, 0x7025, 0xd413, 0x455a, 0x1ce3, 0xbd45, 0x7263, 0x27f7, + 0x23e3, 0x518e, 0xbe06, 0xc8c4, 0xe332, 0x4276, 0x68b4, 0xb166}, + {0x596f, 0x0cf6, 0xc8ec, 0x787b, 0x04c1, 0x473c, 0xd2b8, 0x8d54, + 0x9cdf, 0x77f2, 0xd3f3, 0x6735, 0x0638, 0xf80e, 0x9467, 0xc6aa}, + {0xc7e7, 0x1822, 0xb62a, 0xec0d, 0x89cd, 0x7846, 0xbfa2, 0x35d5, + 0xfa38, 0x870f, 0x494b, 0x1697, 0x8b17, 0xf904, 0x10b6, 0x9822}}, + {{0x6d5b, 0x1d4f, 0x0aaf, 0x807b, 0x35fb, 0x7ee8, 0x00c6, 0x059a, + 0xddf0, 0x1fb1, 0xc38a, 0xd78e, 0x2aa4, 0x79e7, 0xad28, 0xc3f1}, + {0xe3bb, 0x174e, 0xe0a8, 0x74b6, 0xbd5b, 0x35f6, 0x6d23, 0x6328, + 0xc11f, 0x83e1, 0xf928, 0xa918, 0x838e, 0xbf43, 0xe243, 0xfffb}, + {0x9cf2, 0x6b8b, 0x3476, 0x9d06, 0xdcf2, 0xdb8a, 0x89cd, 0x4857, + 0x75c2, 0xabb8, 0x490b, 0xc9bd, 0x890e, 0xe36e, 0xd552, 0xfffa}}, + {{0x2f09, 0x9d62, 0xa9fc, 0xf090, 0xd6d1, 0x9d1d, 0x1828, 0xe413, + 0xc92b, 0x3d5a, 0x1373, 0x368c, 0xbaf2, 0x2158, 0x71eb, 0x08a3}, + {0x2f09, 0x1d62, 0x4630, 0x0de1, 0x06dc, 0xf7f1, 0xc161, 0x1e92, + 0x7495, 0x97e4, 0x94b6, 0xa39e, 0x4f1b, 0x18f8, 0x7bd4, 0x0c4c}, + {0xeb3d, 0x723d, 0x0907, 0x525b, 0x463a, 0x49a8, 0xc6b8, 0xce7f, + 0x740c, 0x0d7d, 0xa83b, 0x457f, 0xae8e, 0xc6af, 0xd331, 0x0475}}, + {{0x6abd, 0xc7af, 0x3e4e, 0x95fd, 0x8fc4, 0xee25, 0x1f9c, 0x0afe, + 0x291d, 0xcde0, 0x48f4, 0xb2e8, 0xf7af, 0x8f8d, 0x0bd6, 0x078d}, + {0x4037, 0xbf0e, 0x2081, 0xf363, 0x13b2, 0x381e, 0xfb6e, 0x818e, + 0x27e4, 0x5662, 0x18b0, 0x0cd2, 0x81f5, 0x9415, 0x0d6c, 0xf9fb}, + {0xd205, 0x0981, 0x0498, 0x1f08, 0xdb93, 0x1732, 0x0579, 0x1424, + 0xad95, 0x642f, 0x050c, 0x1d6d, 0xfc95, 0xfc4a, 0xd41b, 0x3521}}, + {{0xf23a, 0x4633, 0xaef4, 0x1a92, 0x3c8b, 0x1f09, 0x30f3, 0x4c56, + 0x2a2f, 0x4f62, 0xf5e4, 0x8329, 0x63cc, 0xb593, 0xec6a, 0xc428}, + {0x93a7, 0xfcf6, 0x606d, 0xd4b2, 0x2aad, 0x28b4, 0xc65b, 0x8998, + 0x4e08, 0xd178, 0x0900, 0xc82b, 0x7470, 0xa342, 0x7c0f, 0xffff}, + {0x315f, 0xf304, 0xeb7b, 0xe5c3, 0x1451, 0x6311, 0x8f37, 0x93a8, + 0x4a38, 0xa6c6, 0xe393, 0x1087, 0x6301, 0xd673, 0x4ec4, 0xffff}}, + {{0x892e, 0xeed0, 0x1165, 0xcbc1, 0x5545, 0xa280, 0x7243, 0x10c9, + 0x9536, 0x36af, 0xb3fc, 0x2d7c, 0xe8a5, 0x09d6, 0xe1d4, 0xe85d}, + {0xae09, 0xc28a, 0xd777, 0xbd80, 0x23d6, 0xf980, 0xeb7c, 0x4e0e, + 0xf7dc, 0x6475, 0xf10a, 0x2d33, 0x5dfd, 0x797a, 0x7f1c, 0xf71a}, + {0x4064, 0x8717, 0xd091, 0x80b0, 0x4527, 0x8442, 0xac8b, 0x9614, + 0xc633, 0x35f5, 0x7714, 0x2e83, 0x4aaa, 0xd2e4, 0x1acd, 0x0562}}, + {{0xdb64, 0x0937, 0x308b, 0x53b0, 0x00e8, 0xc77f, 0x2f30, 0x37f7, + 0x79ce, 0xeb7f, 0xde81, 0x9286, 0xafda, 0x0e62, 0xae00, 0x0067}, + {0x2cc7, 0xd362, 0xb161, 0x0557, 0x4ff2, 0xb9c8, 0x06fe, 0x5f2b, + 0xde33, 0x0190, 0x28c6, 0xb886, 0xee2b, 0x5a4e, 0x3289, 0x0185}, + {0x4215, 0x923e, 0xf34f, 0xb362, 0x88f8, 0xceec, 0xafdd, 0x7f42, + 0x0c57, 0x56b2, 0xa366, 0x6a08, 0x0826, 0xfb8f, 0x1b03, 0x0163}}, + {{0xa4ba, 0x8408, 0x810a, 0xdeba, 0x47a3, 0x853a, 0xeb64, 0x2f74, + 0x3039, 0x038c, 0x7fbb, 0x498e, 0xd1e9, 0x46fb, 0x5691, 0x32a4}, + {0xd749, 0xb49d, 0x20b7, 0x2af6, 0xd34a, 0xd2da, 0x0a10, 0xf781, + 0x58c9, 0x171f, 0x3cb6, 0x6337, 0x88cd, 0xcf1e, 0xb246, 0x7351}, + {0xf729, 0xcf0a, 0x96ea, 0x032c, 0x4a8f, 0x42fe, 0xbac8, 0xec65, + 0x1510, 0x0d75, 0x4c17, 0x8d29, 0xa03f, 0x8b7e, 0x2c49, 0x0000}}, + {{0x0fa4, 0x8e1c, 0x3788, 0xba3c, 0x8d52, 0xd89d, 0x12c8, 0xeced, + 0x9fe6, 0x9b88, 0xecf3, 0xe3c8, 0xac48, 0x76ed, 0xf23e, 0xda79}, + {0x1103, 0x227c, 0x5b00, 0x3fcf, 0xc5d0, 0x2d28, 0x8020, 0x4d1c, + 0xc6b9, 0x67f9, 0x6f39, 0x989a, 0xda53, 0x3847, 0xd416, 0xe0d0}, + {0xdd8e, 0xcf31, 0x3710, 0x7e44, 0xa511, 0x933c, 0x0cc3, 0x5145, + 0xf632, 0x5e1d, 0x038f, 0x5ce7, 0x7265, 0xda9d, 0xded6, 0x08f8}}, + {{0xe2c8, 0x91d5, 0xa5f5, 0x735f, 0x6b58, 0x56dc, 0xb39d, 0x5c4a, + 0x57d0, 0xa1c2, 0xd92f, 0x9ad4, 0xf7c4, 0x51dd, 0xaf5c, 0x0096}, + {0x1739, 0x7207, 0x7505, 0xbf35, 0x42de, 0x0a29, 0xa962, 0xdedf, + 0x53e8, 0x12bf, 0xcde7, 0xd8e2, 0x8d4d, 0x2c4b, 0xb1b1, 0x0628}, + {0x992d, 0xe3a7, 0xb422, 0xc198, 0x23ab, 0xa6ef, 0xb45d, 0x50da, + 0xa738, 0x014a, 0x2310, 0x85fb, 0x5fe8, 0x1b18, 0x1774, 0x03a7}}, + {{0x1f16, 0x2b09, 0x0236, 0xee90, 0xccf9, 0x9775, 0x8130, 0x4c91, + 0x9091, 0x310b, 0x6dc4, 0x86f6, 0xc2e8, 0xef60, 0xfc0e, 0xf3a4}, + {0x9f49, 0xac15, 0x02af, 0x110f, 0xc59d, 0x5677, 0xa1a9, 0x38d5, + 0x914f, 0xa909, 0x3a3a, 0x4a39, 0x3703, 0xea30, 0x73da, 0xffad}, + {0x15ed, 0xdd16, 0x83c7, 0x270a, 0x862f, 0xd8ad, 0xcaa1, 0x5f41, + 0x99a9, 0x3fc8, 0x7bb2, 0x360a, 0xb06d, 0xfadc, 0x1b36, 0xffa8}}, + {{0xc4e0, 0xb8fd, 0x5106, 0xe169, 0x754c, 0xa58c, 0xc413, 0x8224, + 0x5483, 0x63ec, 0xd477, 0x8473, 0x4778, 0x9281, 0x0000, 0x0000}, + {0x85e1, 0xff54, 0xb200, 0xe413, 0xf4f4, 0x4c0f, 0xfcec, 0xc183, + 0x60d3, 0x1b0c, 0x3834, 0x601c, 0x943c, 0xbe6e, 0x0002, 0x0000}, + {0xf4f8, 0xfd5e, 0x61ef, 0xece8, 0x9199, 0xe5c4, 0x05a6, 0xe6c3, + 0xc4ae, 0x8b28, 0x66b1, 0x8a95, 0x9ece, 0x8f4a, 0x0001, 0x0000}}, + {{0xeae9, 0xa1b4, 0xc6d8, 0x2411, 0x2b5a, 0x1dd0, 0x2dc9, 0xb57b, + 0x5ccd, 0x4957, 0xaf59, 0xa04b, 0x5f42, 0xab7c, 0x2826, 0x526f}, + {0xf407, 0x165a, 0xb724, 0x2f12, 0x2ea1, 0x470b, 0x4464, 0xbd35, + 0x606f, 0xd73e, 0x50d3, 0x8a7f, 0x8029, 0x7ffc, 0xbe31, 0x6cfb}, + {0x8171, 0x1f4c, 0xced2, 0x9c99, 0x6d7e, 0x5a0f, 0xfefb, 0x59e3, + 0xa0c8, 0xabd9, 0xc4c5, 0x57d3, 0xbfa3, 0x4f11, 0x96a2, 0x5a7d}}, + {{0xe068, 0x4cc0, 0x8bcd, 0xc903, 0x9e52, 0xb3e1, 0xd745, 0x0995, + 0xdd8f, 0xf14b, 0xd2ac, 0xd65a, 0xda1d, 0xa742, 0xbac5, 0x474c}, + {0x7481, 0xf2ad, 0x9757, 0x2d82, 0xb683, 0xb16b, 0x0002, 0x7b60, + 0x8f0c, 0x2594, 0x8f64, 0x3b7a, 0x3552, 0x8d9d, 0xb9d7, 0x67eb}, + {0xcaab, 0xb9a1, 0xf966, 0xe311, 0x5b34, 0x0fa0, 0x6abc, 0x8134, + 0xab3d, 0x90f6, 0x1984, 0x9232, 0xec17, 0x74e5, 0x2ceb, 0x434e}}, + {{0x0fb1, 0x7a55, 0x1a5c, 0x53eb, 0xd7b3, 0x7a01, 0xca32, 0x31f6, + 0x3b74, 0x679e, 0x1501, 0x6c57, 0xdb20, 0x8b7c, 0xd7d0, 0x8097}, + {0xb127, 0xb20c, 0xe3a2, 0x96f3, 0xe0d8, 0xd50c, 0x14b4, 0x0b40, + 0x6eeb, 0xa258, 0x99db, 0x3c8c, 0x0f51, 0x4198, 0x3887, 0xffd0}, + {0x0273, 0x9f8c, 0x9669, 0xbbba, 0x1c49, 0x767c, 0xc2af, 0x59f0, + 0x1366, 0xd397, 0x63ac, 0x6fe8, 0x1a9a, 0x1259, 0x01d0, 0x0016}}, + {{0x7876, 0x2a35, 0xa24a, 0x433e, 0x5501, 0x573c, 0xd76d, 0xcb82, + 0x1334, 0xb4a6, 0xf290, 0xc797, 0xeae9, 0x2b83, 0x1e2b, 0x8b14}, + {0x3885, 0x8aef, 0x9dea, 0x2b8c, 0xdd7c, 0xd7cd, 0xb0cc, 0x05ee, + 0x361b, 0x3800, 0xb0d4, 0x4c23, 0xbd3f, 0x5180, 0x9783, 0xff80}, + {0xab36, 0x3104, 0xdae8, 0x0704, 0x4a28, 0x6714, 0x824b, 0x0051, + 0x8134, 0x1f6a, 0x712d, 0x1f03, 0x03b2, 0xecac, 0x377d, 0xfef9}} + }; + + int i, j, ok; + + /* Test known inputs/outputs */ + for (i = 0; (size_t)i < sizeof(CASES) / sizeof(CASES[0]); ++i) { + uint16_t out[16]; + test_modinv32_uint16(out, CASES[i][0], CASES[i][1]); + for (j = 0; j < 16; ++j) CHECK(out[j] == CASES[i][2][j]); +#ifdef SECP256K1_WIDEMUL_INT128 + test_modinv64_uint16(out, CASES[i][0], CASES[i][1]); + for (j = 0; j < 16; ++j) CHECK(out[j] == CASES[i][2][j]); +#endif + } + + for (i = 0; i < 100 * count; ++i) { + /* 256-bit numbers in 16-uint16_t's notation */ + static const uint16_t ZERO[16] = {0}; + uint16_t xd[16]; /* the number (in range [0,2^256)) to be inverted */ + uint16_t md[16]; /* the modulus (odd, in range [3,2^256)) */ + uint16_t id[16]; /* the inverse of xd mod md */ + + /* generate random xd and md, so that md is odd, md>1, xd 256) { now = 256 - i; } - rustsecp256k1_v0_4_0_scalar_set_int(&t, rustsecp256k1_v0_4_0_scalar_get_bits_var(&s, 256 - now - i, now)); + rustsecp256k1_v0_4_1_scalar_set_int(&t, rustsecp256k1_v0_4_1_scalar_get_bits_var(&s, 256 - now - i, now)); for (j = 0; j < now; j++) { - rustsecp256k1_v0_4_0_scalar_add(&n, &n, &n); + rustsecp256k1_v0_4_1_scalar_add(&n, &n, &n); } - rustsecp256k1_v0_4_0_scalar_add(&n, &n, &t); + rustsecp256k1_v0_4_1_scalar_add(&n, &n, &t); i += now; } - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&n, &s)); - } - -#ifndef USE_NUM_NONE - { - /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ - rustsecp256k1_v0_4_0_num rnum; - rustsecp256k1_v0_4_0_num r2num; - rustsecp256k1_v0_4_0_scalar r; - rustsecp256k1_v0_4_0_num_add(&rnum, &snum, &s2num); - rustsecp256k1_v0_4_0_num_mod(&rnum, &order); - rustsecp256k1_v0_4_0_scalar_add(&r, &s, &s2); - rustsecp256k1_v0_4_0_scalar_get_num(&r2num, &r); - CHECK(rustsecp256k1_v0_4_0_num_eq(&rnum, &r2num)); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&n, &s)); } { - /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */ - rustsecp256k1_v0_4_0_scalar r; - rustsecp256k1_v0_4_0_num r2num; - rustsecp256k1_v0_4_0_num rnum; - rustsecp256k1_v0_4_0_num_mul(&rnum, &snum, &s2num); - rustsecp256k1_v0_4_0_num_mod(&rnum, &order); - rustsecp256k1_v0_4_0_scalar_mul(&r, &s, &s2); - rustsecp256k1_v0_4_0_scalar_get_num(&r2num, &r); - CHECK(rustsecp256k1_v0_4_0_num_eq(&rnum, &r2num)); - /* The result can only be zero if at least one of the factors was zero. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&r) == (rustsecp256k1_v0_4_0_scalar_is_zero(&s) || rustsecp256k1_v0_4_0_scalar_is_zero(&s2))); - /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&rnum, &snum) == (rustsecp256k1_v0_4_0_scalar_is_zero(&s) || rustsecp256k1_v0_4_0_scalar_is_one(&s2))); - CHECK(rustsecp256k1_v0_4_0_num_eq(&rnum, &s2num) == (rustsecp256k1_v0_4_0_scalar_is_zero(&s2) || rustsecp256k1_v0_4_0_scalar_is_one(&s))); - } - - { - rustsecp256k1_v0_4_0_scalar neg; - rustsecp256k1_v0_4_0_num negnum; - rustsecp256k1_v0_4_0_num negnum2; - /* Check that comparison with zero matches comparison with zero on the number. */ - CHECK(rustsecp256k1_v0_4_0_num_is_zero(&snum) == rustsecp256k1_v0_4_0_scalar_is_zero(&s)); - /* Check that comparison with the half order is equal to testing for high scalar. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_high(&s) == (rustsecp256k1_v0_4_0_num_cmp(&snum, &half_order) > 0)); - rustsecp256k1_v0_4_0_scalar_negate(&neg, &s); - rustsecp256k1_v0_4_0_num_sub(&negnum, &order, &snum); - rustsecp256k1_v0_4_0_num_mod(&negnum, &order); - /* Check that comparison with the half order is equal to testing for high scalar after negation. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_high(&neg) == (rustsecp256k1_v0_4_0_num_cmp(&negnum, &half_order) > 0)); - /* Negating should change the high property, unless the value was already zero. */ - CHECK((rustsecp256k1_v0_4_0_scalar_is_high(&s) == rustsecp256k1_v0_4_0_scalar_is_high(&neg)) == rustsecp256k1_v0_4_0_scalar_is_zero(&s)); - rustsecp256k1_v0_4_0_scalar_get_num(&negnum2, &neg); - /* Negating a scalar should be equal to (order - n) mod order on the number. */ - CHECK(rustsecp256k1_v0_4_0_num_eq(&negnum, &negnum2)); - rustsecp256k1_v0_4_0_scalar_add(&neg, &neg, &s); - /* Adding a number to its negation should result in zero. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&neg)); - rustsecp256k1_v0_4_0_scalar_negate(&neg, &neg); - /* Negating zero should still result in zero. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&neg)); - } - - { - /* Test rustsecp256k1_v0_4_0_scalar_mul_shift_var. */ - rustsecp256k1_v0_4_0_scalar r; - rustsecp256k1_v0_4_0_num one; - rustsecp256k1_v0_4_0_num rnum; - rustsecp256k1_v0_4_0_num rnum2; - unsigned char cone[1] = {0x01}; - unsigned int shift = 256 + rustsecp256k1_v0_4_0_testrand_int(257); - rustsecp256k1_v0_4_0_scalar_mul_shift_var(&r, &s1, &s2, shift); - rustsecp256k1_v0_4_0_num_mul(&rnum, &s1num, &s2num); - rustsecp256k1_v0_4_0_num_shift(&rnum, shift - 1); - rustsecp256k1_v0_4_0_num_set_bin(&one, cone, 1); - rustsecp256k1_v0_4_0_num_add(&rnum, &rnum, &one); - rustsecp256k1_v0_4_0_num_shift(&rnum, 1); - rustsecp256k1_v0_4_0_scalar_get_num(&rnum2, &r); - CHECK(rustsecp256k1_v0_4_0_num_eq(&rnum, &rnum2)); - } - - { - /* test rustsecp256k1_v0_4_0_scalar_shr_int */ - rustsecp256k1_v0_4_0_scalar r; + /* test rustsecp256k1_v0_4_1_scalar_shr_int */ + rustsecp256k1_v0_4_1_scalar r; int i; random_scalar_order_test(&r); for (i = 0; i < 100; ++i) { int low; - int shift = 1 + rustsecp256k1_v0_4_0_testrand_int(15); + int shift = 1 + rustsecp256k1_v0_4_1_testrand_int(15); int expected = r.d[0] % (1 << shift); - low = rustsecp256k1_v0_4_0_scalar_shr_int(&r, shift); + low = rustsecp256k1_v0_4_1_scalar_shr_int(&r, shift); CHECK(expected == low); } } -#endif - - { - /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ - if (!rustsecp256k1_v0_4_0_scalar_is_zero(&s)) { - rustsecp256k1_v0_4_0_scalar inv; -#ifndef USE_NUM_NONE - rustsecp256k1_v0_4_0_num invnum; - rustsecp256k1_v0_4_0_num invnum2; -#endif - rustsecp256k1_v0_4_0_scalar_inverse(&inv, &s); -#ifndef USE_NUM_NONE - rustsecp256k1_v0_4_0_num_mod_inverse(&invnum, &snum, &order); - rustsecp256k1_v0_4_0_scalar_get_num(&invnum2, &inv); - CHECK(rustsecp256k1_v0_4_0_num_eq(&invnum, &invnum2)); -#endif - rustsecp256k1_v0_4_0_scalar_mul(&inv, &inv, &s); - /* Multiplying a scalar with its inverse must result in one. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&inv)); - rustsecp256k1_v0_4_0_scalar_inverse(&inv, &inv); - /* Inverting one must result in one. */ - CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&inv)); -#ifndef USE_NUM_NONE - rustsecp256k1_v0_4_0_scalar_get_num(&invnum, &inv); - CHECK(rustsecp256k1_v0_4_0_num_is_one(&invnum)); -#endif - } - } { /* Test commutativity of add. */ - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_4_0_scalar_add(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar r1, r2; + rustsecp256k1_v0_4_1_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_4_1_scalar_add(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } { - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar b; + rustsecp256k1_v0_4_1_scalar r1, r2; + rustsecp256k1_v0_4_1_scalar b; int i; /* Test add_bit. */ - int bit = rustsecp256k1_v0_4_0_testrand_bits(8); - rustsecp256k1_v0_4_0_scalar_set_int(&b, 1); - CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&b)); + int bit = rustsecp256k1_v0_4_1_testrand_bits(8); + rustsecp256k1_v0_4_1_scalar_set_int(&b, 1); + CHECK(rustsecp256k1_v0_4_1_scalar_is_one(&b)); for (i = 0; i < bit; i++) { - rustsecp256k1_v0_4_0_scalar_add(&b, &b, &b); + rustsecp256k1_v0_4_1_scalar_add(&b, &b, &b); } r1 = s1; r2 = s1; - if (!rustsecp256k1_v0_4_0_scalar_add(&r1, &r1, &b)) { + if (!rustsecp256k1_v0_4_1_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ - rustsecp256k1_v0_4_0_scalar_cadd_bit(&r2, bit, 1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar_cadd_bit(&r2, bit, 1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); /* cadd is a noop when flag is zero */ - rustsecp256k1_v0_4_0_scalar_cadd_bit(&r2, bit, 0); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar_cadd_bit(&r2, bit, 0); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_4_0_scalar_mul(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar r1, r2; + rustsecp256k1_v0_4_1_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_4_1_scalar_mul(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_4_0_scalar_add(&r1, &r1, &s); - rustsecp256k1_v0_4_0_scalar_add(&r2, &s2, &s); - rustsecp256k1_v0_4_0_scalar_add(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar r1, r2; + rustsecp256k1_v0_4_1_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_4_1_scalar_add(&r1, &r1, &s); + rustsecp256k1_v0_4_1_scalar_add(&r2, &s2, &s); + rustsecp256k1_v0_4_1_scalar_add(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_4_0_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_4_0_scalar_mul(&r2, &s2, &s); - rustsecp256k1_v0_4_0_scalar_mul(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar r1, r2; + rustsecp256k1_v0_4_1_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_4_1_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_4_1_scalar_mul(&r2, &s2, &s); + rustsecp256k1_v0_4_1_scalar_mul(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ - rustsecp256k1_v0_4_0_scalar r1, r2, t; - rustsecp256k1_v0_4_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_4_0_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_4_0_scalar_mul(&r2, &s1, &s); - rustsecp256k1_v0_4_0_scalar_mul(&t, &s2, &s); - rustsecp256k1_v0_4_0_scalar_add(&r2, &r2, &t); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); - } - - { - /* Test square. */ - rustsecp256k1_v0_4_0_scalar r1, r2; - rustsecp256k1_v0_4_0_scalar_sqr(&r1, &s1); - rustsecp256k1_v0_4_0_scalar_mul(&r2, &s1, &s1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_4_1_scalar r1, r2, t; + rustsecp256k1_v0_4_1_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_4_1_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_4_1_scalar_mul(&r2, &s1, &s); + rustsecp256k1_v0_4_1_scalar_mul(&t, &s2, &s); + rustsecp256k1_v0_4_1_scalar_add(&r2, &r2, &t); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &r2)); } { /* Test multiplicative identity. */ - rustsecp256k1_v0_4_0_scalar r1, v1; - rustsecp256k1_v0_4_0_scalar_set_int(&v1,1); - rustsecp256k1_v0_4_0_scalar_mul(&r1, &s1, &v1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_4_1_scalar r1, v1; + rustsecp256k1_v0_4_1_scalar_set_int(&v1,1); + rustsecp256k1_v0_4_1_scalar_mul(&r1, &s1, &v1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &s1)); } { /* Test additive identity. */ - rustsecp256k1_v0_4_0_scalar r1, v0; - rustsecp256k1_v0_4_0_scalar_set_int(&v0,0); - rustsecp256k1_v0_4_0_scalar_add(&r1, &s1, &v0); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_4_1_scalar r1, v0; + rustsecp256k1_v0_4_1_scalar_set_int(&v0,0); + rustsecp256k1_v0_4_1_scalar_add(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &s1)); } { /* Test zero product property. */ - rustsecp256k1_v0_4_0_scalar r1, v0; - rustsecp256k1_v0_4_0_scalar_set_int(&v0,0); - rustsecp256k1_v0_4_0_scalar_mul(&r1, &s1, &v0); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &v0)); + rustsecp256k1_v0_4_1_scalar r1, v0; + rustsecp256k1_v0_4_1_scalar_set_int(&v0,0); + rustsecp256k1_v0_4_1_scalar_mul(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &v0)); } } void run_scalar_set_b32_seckey_tests(void) { unsigned char b32[32]; - rustsecp256k1_v0_4_0_scalar s1; - rustsecp256k1_v0_4_0_scalar s2; + rustsecp256k1_v0_4_1_scalar s1; + rustsecp256k1_v0_4_1_scalar s2; /* Usually set_b32 and set_b32_seckey give the same result */ random_scalar_order_b32(b32); - rustsecp256k1_v0_4_0_scalar_set_b32(&s1, b32, NULL); - CHECK(rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&s2, b32) == 1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&s1, &s2) == 1); + rustsecp256k1_v0_4_1_scalar_set_b32(&s1, b32, NULL); + CHECK(rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&s2, b32) == 1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&s1, &s2) == 1); memset(b32, 0, sizeof(b32)); - CHECK(rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&s2, b32) == 0); memset(b32, 0xFF, sizeof(b32)); - CHECK(rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&s2, b32) == 0); } void run_scalar_tests(void) { @@ -1116,65 +1743,23 @@ void run_scalar_tests(void) { { /* (-1)+1 should be zero. */ - rustsecp256k1_v0_4_0_scalar s, o; - rustsecp256k1_v0_4_0_scalar_set_int(&s, 1); - CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&s)); - rustsecp256k1_v0_4_0_scalar_negate(&o, &s); - rustsecp256k1_v0_4_0_scalar_add(&o, &o, &s); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&o)); - rustsecp256k1_v0_4_0_scalar_negate(&o, &o); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&o)); + rustsecp256k1_v0_4_1_scalar s, o; + rustsecp256k1_v0_4_1_scalar_set_int(&s, 1); + CHECK(rustsecp256k1_v0_4_1_scalar_is_one(&s)); + rustsecp256k1_v0_4_1_scalar_negate(&o, &s); + rustsecp256k1_v0_4_1_scalar_add(&o, &o, &s); + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&o)); + rustsecp256k1_v0_4_1_scalar_negate(&o, &o); + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&o)); } -#ifndef USE_NUM_NONE - { - /* Test rustsecp256k1_v0_4_0_scalar_set_b32 boundary conditions */ - rustsecp256k1_v0_4_0_num order; - rustsecp256k1_v0_4_0_scalar scalar; - unsigned char bin[32]; - unsigned char bin_tmp[32]; - int overflow = 0; - /* 2^256-1 - order */ - static const rustsecp256k1_v0_4_0_scalar all_ones_minus_order = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000001UL, - 0x45512319UL, 0x50B75FC4UL, 0x402DA173UL, 0x2FC9BEBEUL - ); - - /* A scalar set to 0s should be 0. */ - memset(bin, 0, 32); - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 0); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&scalar)); - - /* A scalar with value of the curve order should be 0. */ - rustsecp256k1_v0_4_0_scalar_order_get_num(&order); - rustsecp256k1_v0_4_0_num_get_bin(bin, 32, &order); - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 1); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&scalar)); - - /* A scalar with value of the curve order minus one should not overflow. */ - bin[31] -= 1; - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 0); - rustsecp256k1_v0_4_0_scalar_get_b32(bin_tmp, &scalar); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(bin, bin_tmp, 32) == 0); - - /* A scalar set to all 1s should overflow. */ - memset(bin, 0xFF, 32); - rustsecp256k1_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&scalar, &all_ones_minus_order)); - } -#endif - { /* Does check_overflow check catch all ones? */ - static const rustsecp256k1_v0_4_0_scalar overflowed = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_4_1_scalar overflowed = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(&overflowed)); + CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(&overflowed)); } { @@ -1183,16 +1768,14 @@ void run_scalar_tests(void) { * and edge-case coverage on 32-bit and 64-bit implementations. * The responses were generated with Sage 5.9. */ - rustsecp256k1_v0_4_0_scalar x; - rustsecp256k1_v0_4_0_scalar y; - rustsecp256k1_v0_4_0_scalar z; - rustsecp256k1_v0_4_0_scalar zz; - rustsecp256k1_v0_4_0_scalar one; - rustsecp256k1_v0_4_0_scalar r1; - rustsecp256k1_v0_4_0_scalar r2; -#if defined(USE_SCALAR_INV_NUM) - rustsecp256k1_v0_4_0_scalar zzv; -#endif + rustsecp256k1_v0_4_1_scalar x; + rustsecp256k1_v0_4_1_scalar y; + rustsecp256k1_v0_4_1_scalar z; + rustsecp256k1_v0_4_1_scalar zz; + rustsecp256k1_v0_4_1_scalar one; + rustsecp256k1_v0_4_1_scalar r1; + rustsecp256k1_v0_4_1_scalar r2; + rustsecp256k1_v0_4_1_scalar zzv; int overflow; unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, @@ -1726,71 +2309,63 @@ void run_scalar_tests(void) { 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; - rustsecp256k1_v0_4_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_4_1_scalar_set_int(&one, 1); for (i = 0; i < 33; i++) { - rustsecp256k1_v0_4_0_scalar_set_b32(&x, chal[i][0], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_4_0_scalar_set_b32(&y, chal[i][1], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&y, chal[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_4_0_scalar_set_b32(&r1, res[i][0], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&r1, res[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_4_0_scalar_set_b32(&r2, res[i][1], &overflow); + rustsecp256k1_v0_4_1_scalar_set_b32(&r2, res[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_4_0_scalar_mul(&z, &x, &y); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r1, &z)); - if (!rustsecp256k1_v0_4_0_scalar_is_zero(&y)) { - rustsecp256k1_v0_4_0_scalar_inverse(&zz, &y); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&zz)); -#if defined(USE_SCALAR_INV_NUM) - rustsecp256k1_v0_4_0_scalar_inverse_var(&zzv, &y); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&zzv, &zz)); -#endif - rustsecp256k1_v0_4_0_scalar_mul(&z, &z, &zz); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&x, &z)); - rustsecp256k1_v0_4_0_scalar_mul(&zz, &zz, &y); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&zz)); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&one, &zz)); + rustsecp256k1_v0_4_1_scalar_mul(&z, &x, &y); + CHECK(!rustsecp256k1_v0_4_1_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&r1, &z)); + if (!rustsecp256k1_v0_4_1_scalar_is_zero(&y)) { + rustsecp256k1_v0_4_1_scalar_inverse(&zz, &y); + CHECK(!rustsecp256k1_v0_4_1_scalar_check_overflow(&zz)); + rustsecp256k1_v0_4_1_scalar_inverse_var(&zzv, &y); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&zzv, &zz)); + rustsecp256k1_v0_4_1_scalar_mul(&z, &z, &zz); + CHECK(!rustsecp256k1_v0_4_1_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x, &z)); + rustsecp256k1_v0_4_1_scalar_mul(&zz, &zz, &y); + CHECK(!rustsecp256k1_v0_4_1_scalar_check_overflow(&zz)); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&one, &zz)); } - rustsecp256k1_v0_4_0_scalar_mul(&z, &x, &x); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&z)); - rustsecp256k1_v0_4_0_scalar_sqr(&zz, &x); - CHECK(!rustsecp256k1_v0_4_0_scalar_check_overflow(&zz)); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&zz, &z)); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&r2, &zz)); } } } /***** FIELD TESTS *****/ -void random_fe(rustsecp256k1_v0_4_0_fe *x) { +void random_fe(rustsecp256k1_v0_4_1_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_4_0_testrand256(bin); - if (rustsecp256k1_v0_4_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_4_1_testrand256(bin); + if (rustsecp256k1_v0_4_1_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_test(rustsecp256k1_v0_4_0_fe *x) { +void random_fe_test(rustsecp256k1_v0_4_1_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_4_0_testrand256_test(bin); - if (rustsecp256k1_v0_4_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_4_1_testrand256_test(bin); + if (rustsecp256k1_v0_4_1_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_non_zero(rustsecp256k1_v0_4_0_fe *nz) { +void random_fe_non_zero(rustsecp256k1_v0_4_1_fe *nz) { int tries = 10; while (--tries >= 0) { random_fe(nz); - rustsecp256k1_v0_4_0_fe_normalize(nz); - if (!rustsecp256k1_v0_4_0_fe_is_zero(nz)) { + rustsecp256k1_v0_4_1_fe_normalize(nz); + if (!rustsecp256k1_v0_4_1_fe_is_zero(nz)) { break; } } @@ -1798,27 +2373,20 @@ void random_fe_non_zero(rustsecp256k1_v0_4_0_fe *nz) { CHECK(tries >= 0); } -void random_fe_non_square(rustsecp256k1_v0_4_0_fe *ns) { - rustsecp256k1_v0_4_0_fe r; +void random_fe_non_square(rustsecp256k1_v0_4_1_fe *ns) { + rustsecp256k1_v0_4_1_fe r; random_fe_non_zero(ns); - if (rustsecp256k1_v0_4_0_fe_sqrt(&r, ns)) { - rustsecp256k1_v0_4_0_fe_negate(ns, ns, 1); + if (rustsecp256k1_v0_4_1_fe_sqrt(&r, ns)) { + rustsecp256k1_v0_4_1_fe_negate(ns, ns, 1); } } -int check_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { - rustsecp256k1_v0_4_0_fe an = *a; - rustsecp256k1_v0_4_0_fe bn = *b; - rustsecp256k1_v0_4_0_fe_normalize_weak(&an); - rustsecp256k1_v0_4_0_fe_normalize_var(&bn); - return rustsecp256k1_v0_4_0_fe_equal_var(&an, &bn); -} - -int check_fe_inverse(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *ai) { - rustsecp256k1_v0_4_0_fe x; - rustsecp256k1_v0_4_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_4_0_fe_mul(&x, a, ai); - return check_fe_equal(&x, &one); +int check_fe_equal(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { + rustsecp256k1_v0_4_1_fe an = *a; + rustsecp256k1_v0_4_1_fe bn = *b; + rustsecp256k1_v0_4_1_fe_normalize_weak(&an); + rustsecp256k1_v0_4_1_fe_normalize_var(&bn); + return rustsecp256k1_v0_4_1_fe_equal_var(&an, &bn); } void run_field_convert(void) { @@ -1828,209 +2396,163 @@ void run_field_convert(void) { 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 }; - static const rustsecp256k1_v0_4_0_fe_storage fes = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_4_1_fe_storage fes = SECP256K1_FE_STORAGE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - static const rustsecp256k1_v0_4_0_fe fe = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_4_1_fe fe = SECP256K1_FE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - rustsecp256k1_v0_4_0_fe fe2; + rustsecp256k1_v0_4_1_fe fe2; unsigned char b322[32]; - rustsecp256k1_v0_4_0_fe_storage fes2; + rustsecp256k1_v0_4_1_fe_storage fes2; /* Check conversions to fe. */ - CHECK(rustsecp256k1_v0_4_0_fe_set_b32(&fe2, b32)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&fe, &fe2)); - rustsecp256k1_v0_4_0_fe_from_storage(&fe2, &fes); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&fe, &fe2)); + CHECK(rustsecp256k1_v0_4_1_fe_set_b32(&fe2, b32)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&fe, &fe2)); + rustsecp256k1_v0_4_1_fe_from_storage(&fe2, &fes); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&fe, &fe2)); /* Check conversion from fe. */ - rustsecp256k1_v0_4_0_fe_get_b32(b322, &fe); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(b322, b32, 32) == 0); - rustsecp256k1_v0_4_0_fe_to_storage(&fes2, &fe); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); + rustsecp256k1_v0_4_1_fe_get_b32(b322, &fe); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(b322, b32, 32) == 0); + rustsecp256k1_v0_4_1_fe_to_storage(&fes2, &fe); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); } -int fe_rustsecp256k1_v0_4_0_memcmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { - rustsecp256k1_v0_4_0_fe t = *b; +int fe_rustsecp256k1_v0_4_1_memcmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) { + rustsecp256k1_v0_4_1_fe t = *b; #ifdef VERIFY t.magnitude = a->magnitude; t.normalized = a->normalized; #endif - return rustsecp256k1_v0_4_0_memcmp_var(a, &t, sizeof(rustsecp256k1_v0_4_0_fe)); + return rustsecp256k1_v0_4_1_memcmp_var(a, &t, sizeof(rustsecp256k1_v0_4_1_fe)); } void run_field_misc(void) { - rustsecp256k1_v0_4_0_fe x; - rustsecp256k1_v0_4_0_fe y; - rustsecp256k1_v0_4_0_fe z; - rustsecp256k1_v0_4_0_fe q; - rustsecp256k1_v0_4_0_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); + rustsecp256k1_v0_4_1_fe x; + rustsecp256k1_v0_4_1_fe y; + rustsecp256k1_v0_4_1_fe z; + rustsecp256k1_v0_4_1_fe q; + rustsecp256k1_v0_4_1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); int i, j; for (i = 0; i < 5*count; i++) { - rustsecp256k1_v0_4_0_fe_storage xs, ys, zs; + rustsecp256k1_v0_4_1_fe_storage xs, ys, zs; random_fe(&x); random_fe_non_zero(&y); /* Test the fe equality and comparison operations. */ - CHECK(rustsecp256k1_v0_4_0_fe_cmp_var(&x, &x) == 0); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&x, &x)); + CHECK(rustsecp256k1_v0_4_1_fe_cmp_var(&x, &x) == 0); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&x, &x)); z = x; - rustsecp256k1_v0_4_0_fe_add(&z,&y); + rustsecp256k1_v0_4_1_fe_add(&z,&y); /* Test fe conditional move; z is not normalized here. */ q = x; - rustsecp256k1_v0_4_0_fe_cmov(&x, &z, 0); + rustsecp256k1_v0_4_1_fe_cmov(&x, &z, 0); #ifdef VERIFY CHECK(x.normalized && x.magnitude == 1); #endif - rustsecp256k1_v0_4_0_fe_cmov(&x, &x, 1); - CHECK(fe_rustsecp256k1_v0_4_0_memcmp_var(&x, &z) != 0); - CHECK(fe_rustsecp256k1_v0_4_0_memcmp_var(&x, &q) == 0); - rustsecp256k1_v0_4_0_fe_cmov(&q, &z, 1); + rustsecp256k1_v0_4_1_fe_cmov(&x, &x, 1); + CHECK(fe_rustsecp256k1_v0_4_1_memcmp_var(&x, &z) != 0); + CHECK(fe_rustsecp256k1_v0_4_1_memcmp_var(&x, &q) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&q, &z, 1); #ifdef VERIFY CHECK(!q.normalized && q.magnitude == z.magnitude); #endif - CHECK(fe_rustsecp256k1_v0_4_0_memcmp_var(&q, &z) == 0); - rustsecp256k1_v0_4_0_fe_normalize_var(&x); - rustsecp256k1_v0_4_0_fe_normalize_var(&z); - CHECK(!rustsecp256k1_v0_4_0_fe_equal_var(&x, &z)); - rustsecp256k1_v0_4_0_fe_normalize_var(&q); - rustsecp256k1_v0_4_0_fe_cmov(&q, &z, (i&1)); + CHECK(fe_rustsecp256k1_v0_4_1_memcmp_var(&q, &z) == 0); + rustsecp256k1_v0_4_1_fe_normalize_var(&x); + rustsecp256k1_v0_4_1_fe_normalize_var(&z); + CHECK(!rustsecp256k1_v0_4_1_fe_equal_var(&x, &z)); + rustsecp256k1_v0_4_1_fe_normalize_var(&q); + rustsecp256k1_v0_4_1_fe_cmov(&q, &z, (i&1)); #ifdef VERIFY CHECK(q.normalized && q.magnitude == 1); #endif for (j = 0; j < 6; j++) { - rustsecp256k1_v0_4_0_fe_negate(&z, &z, j+1); - rustsecp256k1_v0_4_0_fe_normalize_var(&q); - rustsecp256k1_v0_4_0_fe_cmov(&q, &z, (j&1)); + rustsecp256k1_v0_4_1_fe_negate(&z, &z, j+1); + rustsecp256k1_v0_4_1_fe_normalize_var(&q); + rustsecp256k1_v0_4_1_fe_cmov(&q, &z, (j&1)); #ifdef VERIFY CHECK((q.normalized != (j&1)) && q.magnitude == ((j&1) ? z.magnitude : 1)); #endif } - rustsecp256k1_v0_4_0_fe_normalize_var(&z); + rustsecp256k1_v0_4_1_fe_normalize_var(&z); /* Test storage conversion and conditional moves. */ - rustsecp256k1_v0_4_0_fe_to_storage(&xs, &x); - rustsecp256k1_v0_4_0_fe_to_storage(&ys, &y); - rustsecp256k1_v0_4_0_fe_to_storage(&zs, &z); - rustsecp256k1_v0_4_0_fe_storage_cmov(&zs, &xs, 0); - rustsecp256k1_v0_4_0_fe_storage_cmov(&zs, &zs, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xs, &zs, sizeof(xs)) != 0); - rustsecp256k1_v0_4_0_fe_storage_cmov(&ys, &xs, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xs, &ys, sizeof(xs)) == 0); - rustsecp256k1_v0_4_0_fe_from_storage(&x, &xs); - rustsecp256k1_v0_4_0_fe_from_storage(&y, &ys); - rustsecp256k1_v0_4_0_fe_from_storage(&z, &zs); + rustsecp256k1_v0_4_1_fe_to_storage(&xs, &x); + rustsecp256k1_v0_4_1_fe_to_storage(&ys, &y); + rustsecp256k1_v0_4_1_fe_to_storage(&zs, &z); + rustsecp256k1_v0_4_1_fe_storage_cmov(&zs, &xs, 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&zs, &zs, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xs, &zs, sizeof(xs)) != 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&ys, &xs, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xs, &ys, sizeof(xs)) == 0); + rustsecp256k1_v0_4_1_fe_from_storage(&x, &xs); + rustsecp256k1_v0_4_1_fe_from_storage(&y, &ys); + rustsecp256k1_v0_4_1_fe_from_storage(&z, &zs); /* Test that mul_int, mul, and add agree. */ - rustsecp256k1_v0_4_0_fe_add(&y, &x); - rustsecp256k1_v0_4_0_fe_add(&y, &x); + rustsecp256k1_v0_4_1_fe_add(&y, &x); + rustsecp256k1_v0_4_1_fe_add(&y, &x); z = x; - rustsecp256k1_v0_4_0_fe_mul_int(&z, 3); + rustsecp256k1_v0_4_1_fe_mul_int(&z, 3); CHECK(check_fe_equal(&y, &z)); - rustsecp256k1_v0_4_0_fe_add(&y, &x); - rustsecp256k1_v0_4_0_fe_add(&z, &x); + rustsecp256k1_v0_4_1_fe_add(&y, &x); + rustsecp256k1_v0_4_1_fe_add(&z, &x); CHECK(check_fe_equal(&z, &y)); z = x; - rustsecp256k1_v0_4_0_fe_mul_int(&z, 5); - rustsecp256k1_v0_4_0_fe_mul(&q, &x, &fe5); + rustsecp256k1_v0_4_1_fe_mul_int(&z, 5); + rustsecp256k1_v0_4_1_fe_mul(&q, &x, &fe5); CHECK(check_fe_equal(&z, &q)); - rustsecp256k1_v0_4_0_fe_negate(&x, &x, 1); - rustsecp256k1_v0_4_0_fe_add(&z, &x); - rustsecp256k1_v0_4_0_fe_add(&q, &x); + rustsecp256k1_v0_4_1_fe_negate(&x, &x, 1); + rustsecp256k1_v0_4_1_fe_add(&z, &x); + rustsecp256k1_v0_4_1_fe_add(&q, &x); CHECK(check_fe_equal(&y, &z)); CHECK(check_fe_equal(&q, &y)); } } -void run_field_inv(void) { - rustsecp256k1_v0_4_0_fe x, xi, xii; - int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - rustsecp256k1_v0_4_0_fe_inv(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - rustsecp256k1_v0_4_0_fe_inv(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); - } -} - -void run_field_inv_var(void) { - rustsecp256k1_v0_4_0_fe x, xi, xii; - int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - rustsecp256k1_v0_4_0_fe_inv_var(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - rustsecp256k1_v0_4_0_fe_inv_var(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); - } -} - -void run_field_inv_all_var(void) { - rustsecp256k1_v0_4_0_fe x[16], xi[16], xii[16]; - int i; - /* Check it's safe to call for 0 elements */ - rustsecp256k1_v0_4_0_fe_inv_all_var(xi, x, 0); - for (i = 0; i < count; i++) { - size_t j; - size_t len = rustsecp256k1_v0_4_0_testrand_int(15) + 1; - for (j = 0; j < len; j++) { - random_fe_non_zero(&x[j]); - } - rustsecp256k1_v0_4_0_fe_inv_all_var(xi, x, len); - for (j = 0; j < len; j++) { - CHECK(check_fe_inverse(&x[j], &xi[j])); - } - rustsecp256k1_v0_4_0_fe_inv_all_var(xii, xi, len); - for (j = 0; j < len; j++) { - CHECK(check_fe_equal(&x[j], &xii[j])); - } - } -} - void run_sqr(void) { - rustsecp256k1_v0_4_0_fe x, s; + rustsecp256k1_v0_4_1_fe x, s; { int i; - rustsecp256k1_v0_4_0_fe_set_int(&x, 1); - rustsecp256k1_v0_4_0_fe_negate(&x, &x, 1); + rustsecp256k1_v0_4_1_fe_set_int(&x, 1); + rustsecp256k1_v0_4_1_fe_negate(&x, &x, 1); for (i = 1; i <= 512; ++i) { - rustsecp256k1_v0_4_0_fe_mul_int(&x, 2); - rustsecp256k1_v0_4_0_fe_normalize(&x); - rustsecp256k1_v0_4_0_fe_sqr(&s, &x); + rustsecp256k1_v0_4_1_fe_mul_int(&x, 2); + rustsecp256k1_v0_4_1_fe_normalize(&x); + rustsecp256k1_v0_4_1_fe_sqr(&s, &x); } } } -void test_sqrt(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *k) { - rustsecp256k1_v0_4_0_fe r1, r2; - int v = rustsecp256k1_v0_4_0_fe_sqrt(&r1, a); +void test_sqrt(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *k) { + rustsecp256k1_v0_4_1_fe r1, r2; + int v = rustsecp256k1_v0_4_1_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ - rustsecp256k1_v0_4_0_fe_negate(&r2, &r1, 1); - rustsecp256k1_v0_4_0_fe_add(&r1, k); rustsecp256k1_v0_4_0_fe_add(&r2, k); - rustsecp256k1_v0_4_0_fe_normalize(&r1); rustsecp256k1_v0_4_0_fe_normalize(&r2); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&r1) || rustsecp256k1_v0_4_0_fe_is_zero(&r2)); + rustsecp256k1_v0_4_1_fe_negate(&r2, &r1, 1); + rustsecp256k1_v0_4_1_fe_add(&r1, k); rustsecp256k1_v0_4_1_fe_add(&r2, k); + rustsecp256k1_v0_4_1_fe_normalize(&r1); rustsecp256k1_v0_4_1_fe_normalize(&r2); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&r1) || rustsecp256k1_v0_4_1_fe_is_zero(&r2)); } } void run_sqrt(void) { - rustsecp256k1_v0_4_0_fe ns, x, s, t; + rustsecp256k1_v0_4_1_fe ns, x, s, t; int i; /* Check sqrt(0) is 0 */ - rustsecp256k1_v0_4_0_fe_set_int(&x, 0); - rustsecp256k1_v0_4_0_fe_sqr(&s, &x); + rustsecp256k1_v0_4_1_fe_set_int(&x, 0); + rustsecp256k1_v0_4_1_fe_sqr(&s, &x); test_sqrt(&s, &x); /* Check sqrt of small squares (and their negatives) */ for (i = 1; i <= 100; i++) { - rustsecp256k1_v0_4_0_fe_set_int(&x, i); - rustsecp256k1_v0_4_0_fe_sqr(&s, &x); + rustsecp256k1_v0_4_1_fe_set_int(&x, i); + rustsecp256k1_v0_4_1_fe_sqr(&s, &x); test_sqrt(&s, &x); - rustsecp256k1_v0_4_0_fe_negate(&t, &s, 1); + rustsecp256k1_v0_4_1_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); } @@ -2040,64 +2562,376 @@ void run_sqrt(void) { random_fe_non_square(&ns); for (j = 0; j < count; j++) { random_fe(&x); - rustsecp256k1_v0_4_0_fe_sqr(&s, &x); + rustsecp256k1_v0_4_1_fe_sqr(&s, &x); test_sqrt(&s, &x); - rustsecp256k1_v0_4_0_fe_negate(&t, &s, 1); + rustsecp256k1_v0_4_1_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); - rustsecp256k1_v0_4_0_fe_mul(&t, &s, &ns); + rustsecp256k1_v0_4_1_fe_mul(&t, &s, &ns); test_sqrt(&t, NULL); } } } +/***** FIELD/SCALAR INVERSE TESTS *****/ + +static const rustsecp256k1_v0_4_1_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, + 0xBAAEDCE6, 0xAF48A03B, 0xBFD25E8C, 0xD0364140 +); + +static const rustsecp256k1_v0_4_1_fe fe_minus_one = SECP256K1_FE_CONST( + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E +); + +/* These tests test the following identities: + * + * for x==0: 1/x == 0 + * for x!=0: x*(1/x) == 1 + * for x!=0 and x!=1: 1/(1/x - 1) + 1 == -1/(x-1) + */ + +void test_inverse_scalar(rustsecp256k1_v0_4_1_scalar* out, const rustsecp256k1_v0_4_1_scalar* x, int var) +{ + rustsecp256k1_v0_4_1_scalar l, r, t; + + (var ? rustsecp256k1_v0_4_1_scalar_inverse_var : rustsecp256k1_v0_4_1_scalar_inverse_var)(&l, x); /* l = 1/x */ + if (out) *out = l; + if (rustsecp256k1_v0_4_1_scalar_is_zero(x)) { + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&l)); + return; + } + rustsecp256k1_v0_4_1_scalar_mul(&t, x, &l); /* t = x*(1/x) */ + CHECK(rustsecp256k1_v0_4_1_scalar_is_one(&t)); /* x*(1/x) == 1 */ + rustsecp256k1_v0_4_1_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_4_1_scalar_is_zero(&r)) return; + (var ? rustsecp256k1_v0_4_1_scalar_inverse_var : rustsecp256k1_v0_4_1_scalar_inverse_var)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_4_1_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_4_1_scalar_inverse_var : rustsecp256k1_v0_4_1_scalar_inverse_var)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_4_1_scalar_add(&l, &l, &rustsecp256k1_v0_4_1_scalar_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_4_1_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&l)); /* l == 0 */ +} + +void test_inverse_field(rustsecp256k1_v0_4_1_fe* out, const rustsecp256k1_v0_4_1_fe* x, int var) +{ + rustsecp256k1_v0_4_1_fe l, r, t; + + (var ? rustsecp256k1_v0_4_1_fe_inv_var : rustsecp256k1_v0_4_1_fe_inv)(&l, x) ; /* l = 1/x */ + if (out) *out = l; + t = *x; /* t = x */ + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&t)) { + CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&l)); + return; + } + rustsecp256k1_v0_4_1_fe_mul(&t, x, &l); /* t = x*(1/x) */ + rustsecp256k1_v0_4_1_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ + CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ + r = *x; /* r = x */ + rustsecp256k1_v0_4_1_fe_add(&r, &fe_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&r)) return; + (var ? rustsecp256k1_v0_4_1_fe_inv_var : rustsecp256k1_v0_4_1_fe_inv)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_4_1_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_4_1_fe_inv_var : rustsecp256k1_v0_4_1_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_4_1_fe_add(&l, &rustsecp256k1_v0_4_1_fe_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_4_1_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&l)); /* l == 0 */ +} + +void run_inverse_tests(void) +{ + /* Fixed test cases for field inverses: pairs of (x, 1/x) mod p. */ + static const rustsecp256k1_v0_4_1_fe fe_cases[][2] = { + /* 0 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), + SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, + /* 1 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), + SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1)}, + /* -1 */ + {SECP256K1_FE_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xfffffc2e), + SECP256K1_FE_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xfffffc2e)}, + /* 2 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2), + SECP256K1_FE_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x7ffffe18)}, + /* 2**128 */ + {SECP256K1_FE_CONST(0, 0, 0, 1, 0, 0, 0, 0), + SECP256K1_FE_CONST(0xbcb223fe, 0xdc24a059, 0xd838091d, 0xd2253530, 0xffffffff, 0xffffffff, 0xffffffff, 0x434dd931)}, + /* Input known to need 637 divsteps */ + {SECP256K1_FE_CONST(0xe34e9c95, 0x6bee8a84, 0x0dcb632a, 0xdb8a1320, 0x66885408, 0x06f3f996, 0x7c11ca84, 0x19199ec3), + SECP256K1_FE_CONST(0xbd2cbd8f, 0x1c536828, 0x9bccda44, 0x2582ac0c, 0x870152b0, 0x8a3f09fb, 0x1aaadf92, 0x19b618e5)}, + /* Input known to need 567 divsteps starting with delta=1/2. */ + {SECP256K1_FE_CONST(0xf6bc3ba3, 0x636451c4, 0x3e46357d, 0x2c21d619, 0x0988e234, 0x15985661, 0x6672982b, 0xa7549bfc), + SECP256K1_FE_CONST(0xb024fdc7, 0x5547451e, 0x426c585f, 0xbd481425, 0x73df6b75, 0xeef6d9d0, 0x389d87d4, 0xfbb440ba)}, + /* Input known to need 566 divsteps starting with delta=1/2. */ + {SECP256K1_FE_CONST(0xb595d81b, 0x2e3c1e2f, 0x482dbc65, 0xe4865af7, 0x9a0a50aa, 0x29f9e618, 0x6f87d7a5, 0x8d1063ae), + SECP256K1_FE_CONST(0xc983337c, 0x5d5c74e1, 0x49918330, 0x0b53afb5, 0xa0428a0b, 0xce6eef86, 0x059bd8ef, 0xe5b908de)}, + /* Set of 10 inputs accessing all 128 entries in the modinv32 divsteps_var table */ + {SECP256K1_FE_CONST(0x00000000, 0x00000000, 0xe0ff1f80, 0x1f000000, 0x00000000, 0x00000000, 0xfeff0100, 0x00000000), + SECP256K1_FE_CONST(0x9faf9316, 0x77e5049d, 0x0b5e7a1b, 0xef70b893, 0x18c9e30c, 0x045e7fd7, 0x29eddf8c, 0xd62e9e3d)}, + {SECP256K1_FE_CONST(0x621a538d, 0x511b2780, 0x35688252, 0x53f889a4, 0x6317c3ac, 0x32ba0a46, 0x6277c0d1, 0xccd31192), + SECP256K1_FE_CONST(0x38513b0c, 0x5eba856f, 0xe29e882e, 0x9b394d8c, 0x34bda011, 0xeaa66943, 0x6a841a4c, 0x6ae8bcff)}, + {SECP256K1_FE_CONST(0x00000200, 0xf0ffff1f, 0x00000000, 0x0000e0ff, 0xffffffff, 0xfffcffff, 0xffffffff, 0xffff0100), + SECP256K1_FE_CONST(0x5da42a52, 0x3640de9e, 0x13e64343, 0x0c7591b7, 0x6c1e3519, 0xf048c5b6, 0x0484217c, 0xedbf8b2f)}, + {SECP256K1_FE_CONST(0xd1343ef9, 0x4b952621, 0x7c52a2ee, 0x4ea1281b, 0x4ab46410, 0x9f26998d, 0xa686a8ff, 0x9f2103e8), + SECP256K1_FE_CONST(0x84044385, 0x9a4619bf, 0x74e35b6d, 0xa47e0c46, 0x6b7fb47d, 0x9ffab128, 0xb0775aa3, 0xcb318bd1)}, + {SECP256K1_FE_CONST(0xb27235d2, 0xc56a52be, 0x210db37a, 0xd50d23a4, 0xbe621bdd, 0x5df22c6a, 0xe926ba62, 0xd2e4e440), + SECP256K1_FE_CONST(0x67a26e54, 0x483a9d3c, 0xa568469e, 0xd258ab3d, 0xb9ec9981, 0xdca9b1bd, 0x8d2775fe, 0x53ae429b)}, + {SECP256K1_FE_CONST(0x00000000, 0x00000000, 0x00e0ffff, 0xffffff83, 0xffffffff, 0x3f00f00f, 0x000000e0, 0xffffffff), + SECP256K1_FE_CONST(0x310e10f8, 0x23bbfab0, 0xac94907d, 0x076c9a45, 0x8d357d7f, 0xc763bcee, 0x00d0e615, 0x5a6acef6)}, + {SECP256K1_FE_CONST(0xfeff0300, 0x001c0000, 0xf80700c0, 0x0ff0ffff, 0xffffffff, 0x0fffffff, 0xffff0100, 0x7f0000fe), + SECP256K1_FE_CONST(0x28e2fdb4, 0x0709168b, 0x86f598b0, 0x3453a370, 0x530cf21f, 0x32f978d5, 0x1d527a71, 0x59269b0c)}, + {SECP256K1_FE_CONST(0xc2591afa, 0x7bb98ef7, 0x090bb273, 0x85c14f87, 0xbb0b28e0, 0x54d3c453, 0x85c66753, 0xd5574d2f), + SECP256K1_FE_CONST(0xfdca70a2, 0x70ce627c, 0x95e66fae, 0x848a6dbb, 0x07ffb15c, 0x5f63a058, 0xba4140ed, 0x6113b503)}, + {SECP256K1_FE_CONST(0xf5475db3, 0xedc7b5a3, 0x411c047e, 0xeaeb452f, 0xc625828e, 0x1cf5ad27, 0x8eec1060, 0xc7d3e690), + SECP256K1_FE_CONST(0x5eb756c0, 0xf963f4b9, 0xdc6a215e, 0xec8cc2d8, 0x2e9dec01, 0xde5eb88d, 0x6aba7164, 0xaecb2c5a)}, + {SECP256K1_FE_CONST(0x00000000, 0x00f8ffff, 0xffffffff, 0x01000000, 0xe0ff1f00, 0x00000000, 0xffffff7f, 0x00000000), + SECP256K1_FE_CONST(0xe0d2e3d8, 0x49b6157d, 0xe54e88c2, 0x1a7f02ca, 0x7dd28167, 0xf1125d81, 0x7bfa444e, 0xbe110037)}, + /* Selection of randomly generated inputs that reach high/low d/e values in various configurations. */ + {SECP256K1_FE_CONST(0x13cc08a4, 0xd8c41f0f, 0x179c3e67, 0x54c46c67, 0xc4109221, 0x09ab3b13, 0xe24d9be1, 0xffffe950), + SECP256K1_FE_CONST(0xb80c8006, 0xd16abaa7, 0xcabd71e5, 0xcf6714f4, 0x966dd3d0, 0x64767a2d, 0xe92c4441, 0x51008cd1)}, + {SECP256K1_FE_CONST(0xaa6db990, 0x95efbca1, 0x3cc6ff71, 0x0602e24a, 0xf49ff938, 0x99fffc16, 0x46f40993, 0xc6e72057), + SECP256K1_FE_CONST(0xd5d3dd69, 0xb0c195e5, 0x285f1d49, 0xe639e48c, 0x9223f8a9, 0xca1d731d, 0x9ca482f9, 0xa5b93e06)}, + {SECP256K1_FE_CONST(0x1c680eac, 0xaeabffd8, 0x9bdc4aee, 0x1781e3de, 0xa3b08108, 0x0015f2e0, 0x94449e1b, 0x2f67a058), + SECP256K1_FE_CONST(0x7f083f8d, 0x31254f29, 0x6510f475, 0x245c373d, 0xc5622590, 0x4b323393, 0x32ed1719, 0xc127444b)}, + {SECP256K1_FE_CONST(0x147d44b3, 0x012d83f8, 0xc160d386, 0x1a44a870, 0x9ba6be96, 0x8b962707, 0x267cbc1a, 0xb65b2f0a), + SECP256K1_FE_CONST(0x555554ff, 0x170aef1e, 0x50a43002, 0xe51fbd36, 0xafadb458, 0x7a8aded1, 0x0ca6cd33, 0x6ed9087c)}, + {SECP256K1_FE_CONST(0x12423796, 0x22f0fe61, 0xf9ca017c, 0x5384d107, 0xa1fbf3b2, 0x3b018013, 0x916a3c37, 0x4000b98c), + SECP256K1_FE_CONST(0x20257700, 0x08668f94, 0x1177e306, 0x136c01f5, 0x8ed1fbd2, 0x95ec4589, 0xae38edb9, 0xfd19b6d7)}, + {SECP256K1_FE_CONST(0xdcf2d030, 0x9ab42cb4, 0x93ffa181, 0xdcd23619, 0x39699b52, 0x08909a20, 0xb5a17695, 0x3a9dcf21), + SECP256K1_FE_CONST(0x1f701dea, 0xe211fb1f, 0x4f37180d, 0x63a0f51c, 0x29fe1e40, 0xa40b6142, 0x2e7b12eb, 0x982b06b6)}, + {SECP256K1_FE_CONST(0x79a851f6, 0xa6314ed3, 0xb35a55e6, 0xca1c7d7f, 0xe32369ea, 0xf902432e, 0x375308c5, 0xdfd5b600), + SECP256K1_FE_CONST(0xcaae00c5, 0xe6b43851, 0x9dabb737, 0x38cba42c, 0xa02c8549, 0x7895dcbf, 0xbd183d71, 0xafe4476a)}, + {SECP256K1_FE_CONST(0xede78fdd, 0xcfc92bf1, 0x4fec6c6c, 0xdb8d37e2, 0xfb66bc7b, 0x28701870, 0x7fa27c9a, 0x307196ec), + SECP256K1_FE_CONST(0x68193a6c, 0x9a8b87a7, 0x2a760c64, 0x13e473f6, 0x23ae7bed, 0x1de05422, 0x88865427, 0xa3418265)}, + {SECP256K1_FE_CONST(0xa40b2079, 0xb8f88e89, 0xa7617997, 0x89baf5ae, 0x174df343, 0x75138eae, 0x2711595d, 0x3fc3e66c), + SECP256K1_FE_CONST(0x9f99c6a5, 0x6d685267, 0xd4b87c37, 0x9d9c4576, 0x358c692b, 0x6bbae0ed, 0x3389c93d, 0x7fdd2655)}, + {SECP256K1_FE_CONST(0x7c74c6b6, 0xe98d9151, 0x72645cf1, 0x7f06e321, 0xcefee074, 0x15b2113a, 0x10a9be07, 0x08a45696), + SECP256K1_FE_CONST(0x8c919a88, 0x898bc1e0, 0x77f26f97, 0x12e655b7, 0x9ba0ac40, 0xe15bb19e, 0x8364cc3b, 0xe227a8ee)}, + {SECP256K1_FE_CONST(0x109ba1ce, 0xdafa6d4a, 0xa1cec2b2, 0xeb1069f4, 0xb7a79e5b, 0xec6eb99b, 0xaec5f643, 0xee0e723e), + SECP256K1_FE_CONST(0x93d13eb8, 0x4bb0bcf9, 0xe64f5a71, 0xdbe9f359, 0x7191401c, 0x6f057a4a, 0xa407fe1b, 0x7ecb65cc)}, + {SECP256K1_FE_CONST(0x3db076cd, 0xec74a5c9, 0xf61dd138, 0x90e23e06, 0xeeedd2d0, 0x74cbc4e0, 0x3dbe1e91, 0xded36a78), + SECP256K1_FE_CONST(0x3f07f966, 0x8e2a1e09, 0x706c71df, 0x02b5e9d5, 0xcb92ddbf, 0xcdd53010, 0x16545564, 0xe660b107)}, + {SECP256K1_FE_CONST(0xe31c73ed, 0xb4c4b82c, 0x02ae35f7, 0x4cdec153, 0x98b522fd, 0xf7d2460c, 0x6bf7c0f8, 0x4cf67b0d), + SECP256K1_FE_CONST(0x4b8f1faf, 0x94e8b070, 0x19af0ff6, 0xa319cd31, 0xdf0a7ffb, 0xefaba629, 0x59c50666, 0x1fe5b843)}, + {SECP256K1_FE_CONST(0x4c8b0e6e, 0x83392ab6, 0xc0e3e9f1, 0xbbd85497, 0x16698897, 0xf552d50d, 0x79652ddb, 0x12f99870), + SECP256K1_FE_CONST(0x56d5101f, 0xd23b7949, 0x17dc38d6, 0xf24022ef, 0xcf18e70a, 0x5cc34424, 0x438544c3, 0x62da4bca)}, + {SECP256K1_FE_CONST(0xb0e040e2, 0x40cc35da, 0x7dd5c611, 0x7fccb178, 0x28888137, 0xbc930358, 0xea2cbc90, 0x775417dc), + SECP256K1_FE_CONST(0xca37f0d4, 0x016dd7c8, 0xab3ae576, 0x96e08d69, 0x68ed9155, 0xa9b44270, 0x900ae35d, 0x7c7800cd)}, + {SECP256K1_FE_CONST(0x8a32ea49, 0x7fbb0bae, 0x69724a9d, 0x8e2105b2, 0xbdf69178, 0x862577ef, 0x35055590, 0x667ddaef), + SECP256K1_FE_CONST(0xd02d7ead, 0xc5e190f0, 0x559c9d72, 0xdaef1ffc, 0x64f9f425, 0xf43645ea, 0x7341e08d, 0x11768e96)}, + {SECP256K1_FE_CONST(0xa3592d98, 0x9abe289d, 0x579ebea6, 0xbb0857a8, 0xe242ab73, 0x85f9a2ce, 0xb6998f0f, 0xbfffbfc6), + SECP256K1_FE_CONST(0x093c1533, 0x32032efa, 0x6aa46070, 0x0039599e, 0x589c35f4, 0xff525430, 0x7fe3777a, 0x44b43ddc)}, + {SECP256K1_FE_CONST(0x647178a3, 0x229e607b, 0xcc98521a, 0xcce3fdd9, 0x1e1bc9c9, 0x97fb7c6a, 0x61b961e0, 0x99b10709), + SECP256K1_FE_CONST(0x98217c13, 0xd51ddf78, 0x96310e77, 0xdaebd908, 0x602ca683, 0xcb46d07a, 0xa1fcf17e, 0xc8e2feb3)}, + {SECP256K1_FE_CONST(0x7334627c, 0x73f98968, 0x99464b4b, 0xf5964958, 0x1b95870d, 0xc658227e, 0x5e3235d8, 0xdcab5787), + SECP256K1_FE_CONST(0x000006fd, 0xc7e9dd94, 0x40ae367a, 0xe51d495c, 0x07603b9b, 0x2d088418, 0x6cc5c74c, 0x98514307)}, + {SECP256K1_FE_CONST(0x82e83876, 0x96c28938, 0xa50dd1c5, 0x605c3ad1, 0xc048637d, 0x7a50825f, 0x335ed01a, 0x00005760), + SECP256K1_FE_CONST(0xb0393f9f, 0x9f2aa55e, 0xf5607e2e, 0x5287d961, 0x60b3e704, 0xf3e16e80, 0xb4f9a3ea, 0xfec7f02d)}, + {SECP256K1_FE_CONST(0xc97b6cec, 0x3ee6b8dc, 0x98d24b58, 0x3c1970a1, 0xfe06297a, 0xae813529, 0xe76bb6bd, 0x771ae51d), + SECP256K1_FE_CONST(0x0507c702, 0xd407d097, 0x47ddeb06, 0xf6625419, 0x79f48f79, 0x7bf80d0b, 0xfc34b364, 0x253a5db1)}, + {SECP256K1_FE_CONST(0xd559af63, 0x77ea9bc4, 0x3cf1ad14, 0x5c7a4bbb, 0x10e7d18b, 0x7ce0dfac, 0x380bb19d, 0x0bb99bd3), + SECP256K1_FE_CONST(0x00196119, 0xb9b00d92, 0x34edfdb5, 0xbbdc42fc, 0xd2daa33a, 0x163356ca, 0xaa8754c8, 0xb0ec8b0b)}, + {SECP256K1_FE_CONST(0x8ddfa3dc, 0x52918da0, 0x640519dc, 0x0af8512a, 0xca2d33b2, 0xbde52514, 0xda9c0afc, 0xcb29fce4), + SECP256K1_FE_CONST(0xb3e4878d, 0x5cb69148, 0xcd54388b, 0xc23acce0, 0x62518ba8, 0xf09def92, 0x7b31e6aa, 0x6ba35b02)}, + {SECP256K1_FE_CONST(0xf8207492, 0xe3049f0a, 0x65285f2b, 0x0bfff996, 0x00ca112e, 0xc05da837, 0x546d41f9, 0x5194fb91), + SECP256K1_FE_CONST(0x7b7ee50b, 0xa8ed4bbd, 0xf6469930, 0x81419a5c, 0x071441c7, 0x290d046e, 0x3b82ea41, 0x611c5f95)}, + {SECP256K1_FE_CONST(0x050f7c80, 0x5bcd3c6b, 0x823cb724, 0x5ce74db7, 0xa4e39f5c, 0xbd8828d7, 0xfd4d3e07, 0x3ec2926a), + SECP256K1_FE_CONST(0x000d6730, 0xb0171314, 0x4764053d, 0xee157117, 0x48fd61da, 0xdea0b9db, 0x1d5e91c6, 0xbdc3f59e)}, + {SECP256K1_FE_CONST(0x3e3ea8eb, 0x05d760cf, 0x23009263, 0xb3cb3ac9, 0x088f6f0d, 0x3fc182a3, 0xbd57087c, 0xe67c62f9), + SECP256K1_FE_CONST(0xbe988716, 0xa29c1bf6, 0x4456aed6, 0xab1e4720, 0x49929305, 0x51043bf4, 0xebd833dd, 0xdd511e8b)}, + {SECP256K1_FE_CONST(0x6964d2a9, 0xa7fa6501, 0xa5959249, 0x142f4029, 0xea0c1b5f, 0x2f487ef6, 0x301ac80a, 0x768be5cd), + SECP256K1_FE_CONST(0x3918ffe4, 0x07492543, 0xed24d0b7, 0x3df95f8f, 0xaffd7cb4, 0x0de2191c, 0x9ec2f2ad, 0x2c0cb3c6)}, + {SECP256K1_FE_CONST(0x37c93520, 0xf6ddca57, 0x2b42fd5e, 0xb5c7e4de, 0x11b5b81c, 0xb95e91f3, 0x95c4d156, 0x39877ccb), + SECP256K1_FE_CONST(0x9a94b9b5, 0x57eb71ee, 0x4c975b8b, 0xac5262a8, 0x077b0595, 0xe12a6b1f, 0xd728edef, 0x1a6bf956)} + }; + /* Fixed test cases for scalar inverses: pairs of (x, 1/x) mod n. */ + static const rustsecp256k1_v0_4_1_scalar scalar_cases[][2] = { + /* 0 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0), + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, + /* 1 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1), + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1)}, + /* -1 */ + {SECP256K1_SCALAR_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xbaaedce6, 0xaf48a03b, 0xbfd25e8c, 0xd0364140), + SECP256K1_SCALAR_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xbaaedce6, 0xaf48a03b, 0xbfd25e8c, 0xd0364140)}, + /* 2 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 2), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x5d576e73, 0x57a4501d, 0xdfe92f46, 0x681b20a1)}, + /* 2**128 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 1, 0, 0, 0, 0), + SECP256K1_SCALAR_CONST(0x50a51ac8, 0x34b9ec24, 0x4b0dff66, 0x5588b13e, 0x9984d5b3, 0xcf80ef0f, 0xd6a23766, 0xa3ee9f22)}, + /* Input known to need 635 divsteps */ + {SECP256K1_SCALAR_CONST(0xcb9f1d35, 0xdd4416c2, 0xcd71bf3f, 0x6365da66, 0x3c9b3376, 0x8feb7ae9, 0x32a5ef60, 0x19199ec3), + SECP256K1_SCALAR_CONST(0x1d7c7bba, 0xf1893d53, 0xb834bd09, 0x36b411dc, 0x42c2e42f, 0xec72c428, 0x5e189791, 0x8e9bc708)}, + /* Input known to need 566 divsteps starting with delta=1/2. */ + {SECP256K1_SCALAR_CONST(0x7e3c993d, 0xa4272488, 0xbc015b49, 0x2db54174, 0xd382083a, 0xebe6db35, 0x80f82eff, 0xcd132c72), + SECP256K1_SCALAR_CONST(0x086f34a0, 0x3e631f76, 0x77418f28, 0xcc84ac95, 0x6304439d, 0x365db268, 0x312c6ded, 0xd0b934f8)}, + /* Input known to need 565 divsteps starting with delta=1/2. */ + {SECP256K1_SCALAR_CONST(0xbad7e587, 0x3f307859, 0x60d93147, 0x8a18491e, 0xb38a9fd5, 0x254350d3, 0x4b1f0e4b, 0x7dd6edc4), + SECP256K1_SCALAR_CONST(0x89f2df26, 0x39e2b041, 0xf19bd876, 0xd039c8ac, 0xc2223add, 0x29c4943e, 0x6632d908, 0x515f467b)}, + /* Selection of randomly generated inputs that reach low/high d/e values in various configurations. */ + {SECP256K1_SCALAR_CONST(0x1950d757, 0xb37a5809, 0x435059bb, 0x0bb8997e, 0x07e1e3c8, 0x5e5d7d2c, 0x6a0ed8e3, 0xdbde180e), + SECP256K1_SCALAR_CONST(0xbf72af9b, 0x750309e2, 0x8dda230b, 0xfe432b93, 0x7e25e475, 0x4388251e, 0x633d894b, 0x3bcb6f8c)}, + {SECP256K1_SCALAR_CONST(0x9bccf4e7, 0xc5a515e3, 0x50637aa9, 0xbb65a13f, 0x391749a1, 0x62de7d4e, 0xf6d7eabb, 0x3cd10ce0), + SECP256K1_SCALAR_CONST(0xaf2d5623, 0xb6385a33, 0xcd0365be, 0x5e92a70d, 0x7f09179c, 0x3baaf30f, 0x8f9cc83b, 0x20092f67)}, + {SECP256K1_SCALAR_CONST(0x73a57111, 0xb242952a, 0x5c5dee59, 0xf3be2ace, 0xa30a7659, 0xa46e5f47, 0xd21267b1, 0x39e642c9), + SECP256K1_SCALAR_CONST(0xa711df07, 0xcbcf13ef, 0xd61cc6be, 0xbcd058ce, 0xb02cf157, 0x272d4a18, 0x86d0feb3, 0xcd5fa004)}, + {SECP256K1_SCALAR_CONST(0x04884963, 0xce0580b1, 0xba547030, 0x3c691db3, 0x9cd2c84f, 0x24c7cebd, 0x97ebfdba, 0x3e785ec2), + SECP256K1_SCALAR_CONST(0xaaaaaf14, 0xd7c99ba7, 0x517ce2c1, 0x78a28b4c, 0x3769a851, 0xe5c5a03d, 0x4cc28f33, 0x0ec4dc5d)}, + {SECP256K1_SCALAR_CONST(0x1679ed49, 0x21f537b1, 0x815cb8ae, 0x9efc511c, 0x5b9fa037, 0x0b0f275e, 0x6c985281, 0x6c4a9905), + SECP256K1_SCALAR_CONST(0xb14ac3d5, 0x62b52999, 0xef34ead1, 0xffca4998, 0x0294341a, 0x1f8172aa, 0xea1624f9, 0x302eea62)}, + {SECP256K1_SCALAR_CONST(0x626b37c0, 0xf0057c35, 0xee982f83, 0x452a1fd3, 0xea826506, 0x48b08a9d, 0x1d2c4799, 0x4ad5f6ec), + SECP256K1_SCALAR_CONST(0xe38643b7, 0x567bfc2f, 0x5d2f1c15, 0xe327239c, 0x07112443, 0x69509283, 0xfd98e77a, 0xdb71c1e8)}, + {SECP256K1_SCALAR_CONST(0x1850a3a7, 0x759efc56, 0x54f287b2, 0x14d1234b, 0xe263bbc9, 0xcf4d8927, 0xd5f85f27, 0x965bd816), + SECP256K1_SCALAR_CONST(0x3b071831, 0xcac9619a, 0xcceb0596, 0xf614d63b, 0x95d0db2f, 0xc6a00901, 0x8eaa2621, 0xabfa0009)}, + {SECP256K1_SCALAR_CONST(0x94ae5d06, 0xa27dc400, 0x487d72be, 0xaa51ebed, 0xe475b5c0, 0xea675ffc, 0xf4df627a, 0xdca4222f), + SECP256K1_SCALAR_CONST(0x01b412ed, 0xd7830956, 0x1532537e, 0xe5e3dc99, 0x8fd3930a, 0x54f8d067, 0x32ef5760, 0x594438a5)}, + {SECP256K1_SCALAR_CONST(0x1f24278a, 0xb5bfe374, 0xa328dbbc, 0xebe35f48, 0x6620e009, 0xd58bb1b4, 0xb5a6bf84, 0x8815f63a), + SECP256K1_SCALAR_CONST(0xfe928416, 0xca5ba2d3, 0xfde513da, 0x903a60c7, 0x9e58ad8a, 0x8783bee4, 0x083a3843, 0xa608c914)}, + {SECP256K1_SCALAR_CONST(0xdc107d58, 0x274f6330, 0x67dba8bc, 0x26093111, 0x5201dfb8, 0x968ce3f5, 0xf34d1bd4, 0xf2146504), + SECP256K1_SCALAR_CONST(0x660cfa90, 0x13c3d93e, 0x7023b1e5, 0xedd09e71, 0x6d9c9d10, 0x7a3d2cdb, 0xdd08edc3, 0xaa78fcfb)}, + {SECP256K1_SCALAR_CONST(0x7cd1e905, 0xc6f02776, 0x2f551cc7, 0x5da61cff, 0x7da05389, 0x1119d5a4, 0x631c7442, 0x894fd4f7), + SECP256K1_SCALAR_CONST(0xff20862a, 0x9d3b1a37, 0x1628803b, 0x3004ccae, 0xaa23282a, 0xa89a1109, 0xd94ece5e, 0x181bdc46)}, + {SECP256K1_SCALAR_CONST(0x5b9dade8, 0x23d26c58, 0xcd12d818, 0x25b8ae97, 0x3dea04af, 0xf482c96b, 0xa062f254, 0x9e453640), + SECP256K1_SCALAR_CONST(0x50c38800, 0x15fa53f4, 0xbe1e5392, 0x5c9b120a, 0x262c22c7, 0x18fa0816, 0x5f2baab4, 0x8cb5db46)}, + {SECP256K1_SCALAR_CONST(0x11cdaeda, 0x969c464b, 0xef1f4ab0, 0x5b01d22e, 0x656fd098, 0x882bea84, 0x65cdbe7a, 0x0c19ff03), + SECP256K1_SCALAR_CONST(0x1968d0fa, 0xac46f103, 0xb55f1f72, 0xb3820bed, 0xec6b359a, 0x4b1ae0ad, 0x7e38e1fb, 0x295ccdfb)}, + {SECP256K1_SCALAR_CONST(0x2c351aa1, 0x26e91589, 0x194f8a1e, 0x06561f66, 0x0cb97b7f, 0x10914454, 0x134d1c03, 0x157266b4), + SECP256K1_SCALAR_CONST(0xbe49ada6, 0x92bd8711, 0x41b176c4, 0xa478ba95, 0x14883434, 0x9d1cd6f3, 0xcc4b847d, 0x22af80f5)}, + {SECP256K1_SCALAR_CONST(0x6ba07c6e, 0x13a60edb, 0x6247f5c3, 0x84b5fa56, 0x76fe3ec5, 0x80426395, 0xf65ec2ae, 0x623ba730), + SECP256K1_SCALAR_CONST(0x25ac23f7, 0x418cd747, 0x98376f9d, 0x4a11c7bf, 0x24c8ebfe, 0x4c8a8655, 0x345f4f52, 0x1c515595)}, + {SECP256K1_SCALAR_CONST(0x9397a712, 0x8abb6951, 0x2d4a3d54, 0x703b1c2a, 0x0661dca8, 0xd75c9b31, 0xaed4d24b, 0xd2ab2948), + SECP256K1_SCALAR_CONST(0xc52e8bef, 0xd55ce3eb, 0x1c897739, 0xeb9fb606, 0x36b9cd57, 0x18c51cc2, 0x6a87489e, 0xffd0dcf3)}, + {SECP256K1_SCALAR_CONST(0xe6a808cc, 0xeb437888, 0xe97798df, 0x4e224e44, 0x7e3b380a, 0x207c1653, 0x889f3212, 0xc6738b6f), + SECP256K1_SCALAR_CONST(0x31f9ae13, 0xd1e08b20, 0x757a2e5e, 0x5243a0eb, 0x8ae35f73, 0x19bb6122, 0xb910f26b, 0xda70aa55)}, + {SECP256K1_SCALAR_CONST(0xd0320548, 0xab0effe7, 0xa70779e0, 0x61a347a6, 0xb8c1e010, 0x9d5281f8, 0x2ee588a6, 0x80000000), + SECP256K1_SCALAR_CONST(0x1541897e, 0x78195c90, 0x7583dd9e, 0x728b6100, 0xbce8bc6d, 0x7a53b471, 0x5dcd9e45, 0x4425fcaf)}, + {SECP256K1_SCALAR_CONST(0x93d623f1, 0xd45b50b0, 0x796e9186, 0x9eac9407, 0xd30edc20, 0xef6304cf, 0x250494e7, 0xba503de9), + SECP256K1_SCALAR_CONST(0x7026d638, 0x1178b548, 0x92043952, 0x3c7fb47c, 0xcd3ea236, 0x31d82b01, 0x612fc387, 0x80b9b957)}, + {SECP256K1_SCALAR_CONST(0xf860ab39, 0x55f5d412, 0xa4d73bcc, 0x3b48bd90, 0xc248ffd3, 0x13ca10be, 0x8fba84cc, 0xdd28d6a3), + SECP256K1_SCALAR_CONST(0x5c32fc70, 0xe0b15d67, 0x76694700, 0xfe62be4d, 0xeacdb229, 0x7a4433d9, 0x52155cd0, 0x7649ab59)}, + {SECP256K1_SCALAR_CONST(0x4e41311c, 0x0800af58, 0x7a690a8e, 0xe175c9ba, 0x6981ab73, 0xac532ea8, 0x5c1f5e63, 0x6ac1f189), + SECP256K1_SCALAR_CONST(0xfffffff9, 0xd075982c, 0x7fbd3825, 0xc05038a2, 0x4533b91f, 0x94ec5f45, 0xb280b28f, 0x842324dc)}, + {SECP256K1_SCALAR_CONST(0x48e473bf, 0x3555eade, 0xad5d7089, 0x2424c4e4, 0x0a99397c, 0x2dc796d8, 0xb7a43a69, 0xd0364141), + SECP256K1_SCALAR_CONST(0x634976b2, 0xa0e47895, 0x1ec38593, 0x266d6fd0, 0x6f602644, 0x9bb762f1, 0x7180c704, 0xe23a4daa)}, + {SECP256K1_SCALAR_CONST(0xbe83878d, 0x3292fc54, 0x26e71c62, 0x556ccedc, 0x7cbb8810, 0x4032a720, 0x34ead589, 0xe4d6bd13), + SECP256K1_SCALAR_CONST(0x6cd150ad, 0x25e59d0f, 0x74cbae3d, 0x6377534a, 0x1e6562e8, 0xb71b9d18, 0xe1e5d712, 0x8480abb3)}, + {SECP256K1_SCALAR_CONST(0xcdddf2e5, 0xefc15f88, 0xc9ee06de, 0x8a846ca9, 0x28561581, 0x68daa5fb, 0xd1cf3451, 0xeb1782d0), + SECP256K1_SCALAR_CONST(0xffffffd9, 0xed8d2af4, 0x993c865a, 0x23e9681a, 0x3ca3a3dc, 0xe6d5a46e, 0xbd86bd87, 0x61b55c70)}, + {SECP256K1_SCALAR_CONST(0xb6a18f1f, 0x04872df9, 0x08165ec4, 0x319ca19c, 0x6c0359ab, 0x1f7118fb, 0xc2ef8082, 0xca8b7785), + SECP256K1_SCALAR_CONST(0xff55b19b, 0x0f1ac78c, 0x0f0c88c2, 0x2358d5ad, 0x5f455e4e, 0x3330b72f, 0x274dc153, 0xffbf272b)}, + {SECP256K1_SCALAR_CONST(0xea4898e5, 0x30eba3e8, 0xcf0e5c3d, 0x06ec6844, 0x01e26fb6, 0x75636225, 0xc5d08f4c, 0x1decafa0), + SECP256K1_SCALAR_CONST(0xe5a014a8, 0xe3c4ec1e, 0xea4f9b32, 0xcfc7b386, 0x00630806, 0x12c08d02, 0x6407ccc2, 0xb067d90e)}, + {SECP256K1_SCALAR_CONST(0x70e9aea9, 0x7e933af0, 0x8a23bfab, 0x23e4b772, 0xff951863, 0x5ffcf47d, 0x6bebc918, 0x2ca58265), + SECP256K1_SCALAR_CONST(0xf4e00006, 0x81bc6441, 0x4eb6ec02, 0xc194a859, 0x80ad7c48, 0xba4e9afb, 0x8b6bdbe0, 0x989d8f77)}, + {SECP256K1_SCALAR_CONST(0x3c56c774, 0x46efe6f0, 0xe93618b8, 0xf9b5a846, 0xd247df61, 0x83b1e215, 0x06dc8bcc, 0xeefc1bf5), + SECP256K1_SCALAR_CONST(0xfff8937a, 0x2cd9586b, 0x43c25e57, 0xd1cefa7a, 0x9fb91ed3, 0x95b6533d, 0x8ad0de5b, 0xafb93f00)}, + {SECP256K1_SCALAR_CONST(0xfb5c2772, 0x5cb30e83, 0xe38264df, 0xe4e3ebf3, 0x392aa92e, 0xa68756a1, 0x51279ac5, 0xb50711a8), + SECP256K1_SCALAR_CONST(0x000013af, 0x1105bfe7, 0xa6bbd7fb, 0x3d638f99, 0x3b266b02, 0x072fb8bc, 0x39251130, 0x2e0fd0ea)} + }; + int i, var, testrand; + unsigned char b32[32]; + rustsecp256k1_v0_4_1_fe x_fe; + rustsecp256k1_v0_4_1_scalar x_scalar; + memset(b32, 0, sizeof(b32)); + /* Test fixed test cases through test_inverse_{scalar,field}, both ways. */ + for (i = 0; (size_t)i < sizeof(fe_cases)/sizeof(fe_cases[0]); ++i) { + for (var = 0; var <= 1; ++var) { + test_inverse_field(&x_fe, &fe_cases[i][0], var); + check_fe_equal(&x_fe, &fe_cases[i][1]); + test_inverse_field(&x_fe, &fe_cases[i][1], var); + check_fe_equal(&x_fe, &fe_cases[i][0]); + } + } + for (i = 0; (size_t)i < sizeof(scalar_cases)/sizeof(scalar_cases[0]); ++i) { + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(&x_scalar, &scalar_cases[i][0], var); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x_scalar, &scalar_cases[i][1])); + test_inverse_scalar(&x_scalar, &scalar_cases[i][1], var); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x_scalar, &scalar_cases[i][0])); + } + } + /* Test inputs 0..999 and their respective negations. */ + for (i = 0; i < 1000; ++i) { + b32[31] = i & 0xff; + b32[30] = (i >> 8) & 0xff; + rustsecp256k1_v0_4_1_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_4_1_fe_set_b32(&x_fe, b32); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + rustsecp256k1_v0_4_1_scalar_negate(&x_scalar, &x_scalar); + rustsecp256k1_v0_4_1_fe_negate(&x_fe, &x_fe, 1); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + } + /* test 128*count random inputs; half with testrand256_test, half with testrand256 */ + for (testrand = 0; testrand <= 1; ++testrand) { + for (i = 0; i < 64 * count; ++i) { + (testrand ? rustsecp256k1_v0_4_1_testrand256_test : rustsecp256k1_v0_4_1_testrand256)(b32); + rustsecp256k1_v0_4_1_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_4_1_fe_set_b32(&x_fe, b32); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + } + } +} + /***** GROUP TESTS *****/ -void ge_equals_ge(const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&a->y, &b->y)); } /* This compares jacobian points including their Z, not just their geometric meaning. */ -int gej_xyz_equals_gej(const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_gej *b) { - rustsecp256k1_v0_4_0_gej a2; - rustsecp256k1_v0_4_0_gej b2; +int gej_xyz_equals_gej(const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b) { + rustsecp256k1_v0_4_1_gej a2; + rustsecp256k1_v0_4_1_gej b2; int ret = 1; ret &= a->infinity == b->infinity; if (ret && !a->infinity) { a2 = *a; b2 = *b; - rustsecp256k1_v0_4_0_fe_normalize(&a2.x); - rustsecp256k1_v0_4_0_fe_normalize(&a2.y); - rustsecp256k1_v0_4_0_fe_normalize(&a2.z); - rustsecp256k1_v0_4_0_fe_normalize(&b2.x); - rustsecp256k1_v0_4_0_fe_normalize(&b2.y); - rustsecp256k1_v0_4_0_fe_normalize(&b2.z); - ret &= rustsecp256k1_v0_4_0_fe_cmp_var(&a2.x, &b2.x) == 0; - ret &= rustsecp256k1_v0_4_0_fe_cmp_var(&a2.y, &b2.y) == 0; - ret &= rustsecp256k1_v0_4_0_fe_cmp_var(&a2.z, &b2.z) == 0; + rustsecp256k1_v0_4_1_fe_normalize(&a2.x); + rustsecp256k1_v0_4_1_fe_normalize(&a2.y); + rustsecp256k1_v0_4_1_fe_normalize(&a2.z); + rustsecp256k1_v0_4_1_fe_normalize(&b2.x); + rustsecp256k1_v0_4_1_fe_normalize(&b2.y); + rustsecp256k1_v0_4_1_fe_normalize(&b2.z); + ret &= rustsecp256k1_v0_4_1_fe_cmp_var(&a2.x, &b2.x) == 0; + ret &= rustsecp256k1_v0_4_1_fe_cmp_var(&a2.y, &b2.y) == 0; + ret &= rustsecp256k1_v0_4_1_fe_cmp_var(&a2.z, &b2.z) == 0; } return ret; } -void ge_equals_gej(const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_gej *b) { - rustsecp256k1_v0_4_0_fe z2s; - rustsecp256k1_v0_4_0_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_gej *b) { + rustsecp256k1_v0_4_1_fe z2s; + rustsecp256k1_v0_4_1_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_4_0_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_4_0_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u2); - rustsecp256k1_v0_4_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_4_0_fe_mul(&s1, &s1, &b->z); - s2 = b->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s2); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&u1, &u2)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_4_1_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_4_1_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u2); + rustsecp256k1_v0_4_1_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_4_1_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&s1, &s2)); } void test_ge(void) { @@ -2109,32 +2943,31 @@ void test_ge(void) { * negation, and then those two again but with randomized Z coordinate. * - The same is then done for lambda*p1 and lambda^2*p1. */ - rustsecp256k1_v0_4_0_ge *ge = (rustsecp256k1_v0_4_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_ge) * (1 + 4 * runs)); - rustsecp256k1_v0_4_0_gej *gej = (rustsecp256k1_v0_4_0_gej *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_gej) * (1 + 4 * runs)); - rustsecp256k1_v0_4_0_fe *zinv = (rustsecp256k1_v0_4_0_fe *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_fe) * (1 + 4 * runs)); - rustsecp256k1_v0_4_0_fe zf; - rustsecp256k1_v0_4_0_fe zfi2, zfi3; + rustsecp256k1_v0_4_1_ge *ge = (rustsecp256k1_v0_4_1_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_1_ge) * (1 + 4 * runs)); + rustsecp256k1_v0_4_1_gej *gej = (rustsecp256k1_v0_4_1_gej *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_1_gej) * (1 + 4 * runs)); + rustsecp256k1_v0_4_1_fe zf; + rustsecp256k1_v0_4_1_fe zfi2, zfi3; - rustsecp256k1_v0_4_0_gej_set_infinity(&gej[0]); - rustsecp256k1_v0_4_0_ge_clear(&ge[0]); - rustsecp256k1_v0_4_0_ge_set_gej_var(&ge[0], &gej[0]); + rustsecp256k1_v0_4_1_gej_set_infinity(&gej[0]); + rustsecp256k1_v0_4_1_ge_clear(&ge[0]); + rustsecp256k1_v0_4_1_ge_set_gej_var(&ge[0], &gej[0]); for (i = 0; i < runs; i++) { int j; - rustsecp256k1_v0_4_0_ge g; + rustsecp256k1_v0_4_1_ge g; random_group_element_test(&g); if (i >= runs - 2) { - rustsecp256k1_v0_4_0_ge_mul_lambda(&g, &ge[1]); + rustsecp256k1_v0_4_1_ge_mul_lambda(&g, &ge[1]); } if (i >= runs - 1) { - rustsecp256k1_v0_4_0_ge_mul_lambda(&g, &g); + rustsecp256k1_v0_4_1_ge_mul_lambda(&g, &g); } ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; - rustsecp256k1_v0_4_0_ge_neg(&ge[3 + 4 * i], &g); - rustsecp256k1_v0_4_0_ge_neg(&ge[4 + 4 * i], &g); - rustsecp256k1_v0_4_0_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); + rustsecp256k1_v0_4_1_ge_neg(&ge[3 + 4 * i], &g); + rustsecp256k1_v0_4_1_ge_neg(&ge[4 + 4 * i], &g); + rustsecp256k1_v0_4_1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); - rustsecp256k1_v0_4_0_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); + rustsecp256k1_v0_4_1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); for (j = 0; j < 4; j++) { random_field_element_magnitude(&ge[1 + j + 4 * i].x); @@ -2145,104 +2978,87 @@ void test_ge(void) { } } - /* Compute z inverses. */ - { - rustsecp256k1_v0_4_0_fe *zs = checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_fe) * (1 + 4 * runs)); - for (i = 0; i < 4 * runs + 1; i++) { - if (i == 0) { - /* The point at infinity does not have a meaningful z inverse. Any should do. */ - do { - random_field_element_test(&zs[i]); - } while(rustsecp256k1_v0_4_0_fe_is_zero(&zs[i])); - } else { - zs[i] = gej[i].z; - } - } - rustsecp256k1_v0_4_0_fe_inv_all_var(zinv, zs, 4 * runs + 1); - free(zs); - } - /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ do { random_field_element_test(&zf); - } while(rustsecp256k1_v0_4_0_fe_is_zero(&zf)); + } while(rustsecp256k1_v0_4_1_fe_is_zero(&zf)); random_field_element_magnitude(&zf); - rustsecp256k1_v0_4_0_fe_inv_var(&zfi3, &zf); - rustsecp256k1_v0_4_0_fe_sqr(&zfi2, &zfi3); - rustsecp256k1_v0_4_0_fe_mul(&zfi3, &zfi3, &zfi2); + rustsecp256k1_v0_4_1_fe_inv_var(&zfi3, &zf); + rustsecp256k1_v0_4_1_fe_sqr(&zfi2, &zfi3); + rustsecp256k1_v0_4_1_fe_mul(&zfi3, &zfi3, &zfi2); for (i1 = 0; i1 < 1 + 4 * runs; i1++) { int i2; for (i2 = 0; i2 < 1 + 4 * runs; i2++) { /* Compute reference result using gej + gej (var). */ - rustsecp256k1_v0_4_0_gej refj, resj; - rustsecp256k1_v0_4_0_ge ref; - rustsecp256k1_v0_4_0_fe zr; - rustsecp256k1_v0_4_0_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_4_1_gej refj, resj; + rustsecp256k1_v0_4_1_ge ref; + rustsecp256k1_v0_4_1_fe zr; + rustsecp256k1_v0_4_1_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i1]) ? NULL : &zr); /* Check Z ratio. */ - if (!rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_4_0_gej_is_infinity(&refj)) { - rustsecp256k1_v0_4_0_fe zrz; rustsecp256k1_v0_4_0_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&zrz, &refj.z)); + if (!rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_4_1_gej_is_infinity(&refj)) { + rustsecp256k1_v0_4_1_fe zrz; rustsecp256k1_v0_4_1_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&zrz, &refj.z)); } - rustsecp256k1_v0_4_0_ge_set_gej_var(&ref, &refj); + rustsecp256k1_v0_4_1_ge_set_gej_var(&ref, &refj); /* Test gej + ge with Z ratio result (var). */ - rustsecp256k1_v0_4_0_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_4_1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i1]) ? NULL : &zr); ge_equals_gej(&ref, &resj); - if (!rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_4_0_gej_is_infinity(&resj)) { - rustsecp256k1_v0_4_0_fe zrz; rustsecp256k1_v0_4_0_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&zrz, &resj.z)); + if (!rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_4_1_gej_is_infinity(&resj)) { + rustsecp256k1_v0_4_1_fe zrz; rustsecp256k1_v0_4_1_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&zrz, &resj.z)); } /* Test gej + ge (var, with additional Z factor). */ { - rustsecp256k1_v0_4_0_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ - rustsecp256k1_v0_4_0_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); - rustsecp256k1_v0_4_0_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); + rustsecp256k1_v0_4_1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ + rustsecp256k1_v0_4_1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); + rustsecp256k1_v0_4_1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); random_field_element_magnitude(&ge2_zfi.x); random_field_element_magnitude(&ge2_zfi.y); - rustsecp256k1_v0_4_0_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); + rustsecp256k1_v0_4_1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); ge_equals_gej(&ref, &resj); } /* Test gej + ge (const). */ if (i2 != 0) { - /* rustsecp256k1_v0_4_0_gej_add_ge does not support its second argument being infinity. */ - rustsecp256k1_v0_4_0_gej_add_ge(&resj, &gej[i1], &ge[i2]); + /* rustsecp256k1_v0_4_1_gej_add_ge does not support its second argument being infinity. */ + rustsecp256k1_v0_4_1_gej_add_ge(&resj, &gej[i1], &ge[i2]); ge_equals_gej(&ref, &resj); } /* Test doubling (var). */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { - rustsecp256k1_v0_4_0_fe zr2; + rustsecp256k1_v0_4_1_fe zr2; /* Normal doubling with Z ratio result. */ - rustsecp256k1_v0_4_0_gej_double_var(&resj, &gej[i1], &zr2); + rustsecp256k1_v0_4_1_gej_double_var(&resj, &gej[i1], &zr2); ge_equals_gej(&ref, &resj); /* Check Z ratio. */ - rustsecp256k1_v0_4_0_fe_mul(&zr2, &zr2, &gej[i1].z); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&zr2, &resj.z)); + rustsecp256k1_v0_4_1_fe_mul(&zr2, &zr2, &gej[i1].z); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&zr2, &resj.z)); /* Normal doubling. */ - rustsecp256k1_v0_4_0_gej_double_var(&resj, &gej[i2], NULL); + rustsecp256k1_v0_4_1_gej_double_var(&resj, &gej[i2], NULL); ge_equals_gej(&ref, &resj); /* Constant-time doubling. */ - rustsecp256k1_v0_4_0_gej_double(&resj, &gej[i2]); + rustsecp256k1_v0_4_1_gej_double(&resj, &gej[i2]); ge_equals_gej(&ref, &resj); } /* Test adding opposites. */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&ref)); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&ref)); } /* Test adding infinity. */ if (i1 == 0) { - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&ge[i1])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i1])); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&ge[i1])); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i1])); ge_equals_gej(&ref, &gej[i2]); } if (i2 == 0) { - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&ge[i2])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&gej[i2])); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&ge[i2])); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&gej[i2])); ge_equals_gej(&ref, &gej[i1]); } } @@ -2250,97 +3066,102 @@ void test_ge(void) { /* Test adding all points together in random order equals infinity. */ { - rustsecp256k1_v0_4_0_gej sum = SECP256K1_GEJ_CONST_INFINITY; - rustsecp256k1_v0_4_0_gej *gej_shuffled = (rustsecp256k1_v0_4_0_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_4_0_gej)); + rustsecp256k1_v0_4_1_gej sum = SECP256K1_GEJ_CONST_INFINITY; + rustsecp256k1_v0_4_1_gej *gej_shuffled = (rustsecp256k1_v0_4_1_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_4_1_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + rustsecp256k1_v0_4_0_testrand_int(4 * runs + 1 - i); + int swap = i + rustsecp256k1_v0_4_1_testrand_int(4 * runs + 1 - i); if (swap != i) { - rustsecp256k1_v0_4_0_gej t = gej_shuffled[i]; + rustsecp256k1_v0_4_1_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; gej_shuffled[swap] = t; } } for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_4_0_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); + rustsecp256k1_v0_4_1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); } - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&sum)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&sum)); free(gej_shuffled); } - /* Test batch gej -> ge conversion with and without known z ratios. */ + /* Test batch gej -> ge conversion without known z ratios. */ { - rustsecp256k1_v0_4_0_fe *zr = (rustsecp256k1_v0_4_0_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_4_0_fe)); - rustsecp256k1_v0_4_0_ge *ge_set_all = (rustsecp256k1_v0_4_0_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_4_0_ge)); + rustsecp256k1_v0_4_1_ge *ge_set_all = (rustsecp256k1_v0_4_1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_4_1_ge)); + rustsecp256k1_v0_4_1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { - /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ - if (i < 4 * runs) { - rustsecp256k1_v0_4_0_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); - } - } - rustsecp256k1_v0_4_0_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); - for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_4_0_fe s; + rustsecp256k1_v0_4_1_fe s; random_fe_non_zero(&s); - rustsecp256k1_v0_4_0_gej_rescale(&gej[i], &s); + rustsecp256k1_v0_4_1_gej_rescale(&gej[i], &s); ge_equals_gej(&ge_set_all[i], &gej[i]); } free(ge_set_all); - free(zr); } /* Test batch gej -> ge conversion with many infinities. */ for (i = 0; i < 4 * runs + 1; i++) { + int odd; random_group_element_test(&ge[i]); + odd = rustsecp256k1_v0_4_1_fe_is_odd(&ge[i].x); + CHECK(odd == 0 || odd == 1); /* randomly set half the points to infinity */ - if(rustsecp256k1_v0_4_0_fe_is_odd(&ge[i].x)) { - rustsecp256k1_v0_4_0_ge_set_infinity(&ge[i]); + if (odd == i % 2) { + rustsecp256k1_v0_4_1_ge_set_infinity(&ge[i]); } - rustsecp256k1_v0_4_0_gej_set_ge(&gej[i], &ge[i]); + rustsecp256k1_v0_4_1_gej_set_ge(&gej[i], &ge[i]); } - /* batch invert */ - rustsecp256k1_v0_4_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + /* batch convert */ + rustsecp256k1_v0_4_1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); } + /* Test batch gej -> ge conversion with all infinities. */ + for (i = 0; i < 4 * runs + 1; i++) { + rustsecp256k1_v0_4_1_gej_set_infinity(&gej[i]); + } + /* batch convert */ + rustsecp256k1_v0_4_1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + /* check result */ + for (i = 0; i < 4 * runs + 1; i++) { + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&ge[i])); + } + free(ge); free(gej); - free(zinv); } void test_intialized_inf(void) { - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_gej pj, npj, infj1, infj2, infj3; - rustsecp256k1_v0_4_0_fe zinv; + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_gej pj, npj, infj1, infj2, infj3; + rustsecp256k1_v0_4_1_fe zinv; /* Test that adding P+(-P) results in a fully initalized infinity*/ random_group_element_test(&p); - rustsecp256k1_v0_4_0_gej_set_ge(&pj, &p); - rustsecp256k1_v0_4_0_gej_neg(&npj, &pj); + rustsecp256k1_v0_4_1_gej_set_ge(&pj, &p); + rustsecp256k1_v0_4_1_gej_neg(&npj, &pj); - rustsecp256k1_v0_4_0_gej_add_var(&infj1, &pj, &npj, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&infj1)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj1.x)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj1.y)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj1.z)); + rustsecp256k1_v0_4_1_gej_add_var(&infj1, &pj, &npj, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&infj1)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj1.x)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj1.y)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj1.z)); - rustsecp256k1_v0_4_0_gej_add_ge_var(&infj2, &npj, &p, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&infj2)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj2.x)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj2.y)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj2.z)); + rustsecp256k1_v0_4_1_gej_add_ge_var(&infj2, &npj, &p, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&infj2)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj2.x)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj2.y)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj2.z)); - rustsecp256k1_v0_4_0_fe_set_int(&zinv, 1); - rustsecp256k1_v0_4_0_gej_add_zinv_var(&infj3, &npj, &p, &zinv); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&infj3)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj3.x)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj3.y)); - CHECK(rustsecp256k1_v0_4_0_fe_is_zero(&infj3.z)); + rustsecp256k1_v0_4_1_fe_set_int(&zinv, 1); + rustsecp256k1_v0_4_1_gej_add_zinv_var(&infj3, &npj, &p, &zinv); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&infj3)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj3.x)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj3.y)); + CHECK(rustsecp256k1_v0_4_1_fe_is_zero(&infj3.z)); } @@ -2376,39 +3197,39 @@ void test_add_neg_y_diff_x(void) { * print " Q: %x %x" % Q.xy() * print "P + Q: %x %x" % (P + Q).xy() */ - rustsecp256k1_v0_4_0_gej aj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_4_1_gej aj = SECP256K1_GEJ_CONST( 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d ); - rustsecp256k1_v0_4_0_gej bj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_4_1_gej bj = SECP256K1_GEJ_CONST( 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 ); - rustsecp256k1_v0_4_0_gej sumj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_4_1_gej sumj = SECP256K1_GEJ_CONST( 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe ); - rustsecp256k1_v0_4_0_ge b; - rustsecp256k1_v0_4_0_gej resj; - rustsecp256k1_v0_4_0_ge res; - rustsecp256k1_v0_4_0_ge_set_gej(&b, &bj); + rustsecp256k1_v0_4_1_ge b; + rustsecp256k1_v0_4_1_gej resj; + rustsecp256k1_v0_4_1_ge res; + rustsecp256k1_v0_4_1_ge_set_gej(&b, &bj); - rustsecp256k1_v0_4_0_gej_add_var(&resj, &aj, &bj, NULL); - rustsecp256k1_v0_4_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_4_1_gej_add_var(&resj, &aj, &bj, NULL); + rustsecp256k1_v0_4_1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - rustsecp256k1_v0_4_0_gej_add_ge(&resj, &aj, &b); - rustsecp256k1_v0_4_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_4_1_gej_add_ge(&resj, &aj, &b); + rustsecp256k1_v0_4_1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - rustsecp256k1_v0_4_0_gej_add_ge_var(&resj, &aj, &b, NULL); - rustsecp256k1_v0_4_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_4_1_gej_add_ge_var(&resj, &aj, &b, NULL); + rustsecp256k1_v0_4_1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); } @@ -2422,27 +3243,27 @@ void run_ge(void) { } void test_ec_combine(void) { - rustsecp256k1_v0_4_0_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_4_0_pubkey data[6]; - const rustsecp256k1_v0_4_0_pubkey* d[6]; - rustsecp256k1_v0_4_0_pubkey sd; - rustsecp256k1_v0_4_0_pubkey sd2; - rustsecp256k1_v0_4_0_gej Qj; - rustsecp256k1_v0_4_0_ge Q; + rustsecp256k1_v0_4_1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_4_1_pubkey data[6]; + const rustsecp256k1_v0_4_1_pubkey* d[6]; + rustsecp256k1_v0_4_1_pubkey sd; + rustsecp256k1_v0_4_1_pubkey sd2; + rustsecp256k1_v0_4_1_gej Qj; + rustsecp256k1_v0_4_1_ge Q; int i; for (i = 1; i <= 6; i++) { - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_scalar s; random_scalar_order_test(&s); - rustsecp256k1_v0_4_0_scalar_add(&sum, &sum, &s); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); - rustsecp256k1_v0_4_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_4_0_pubkey_save(&data[i - 1], &Q); + rustsecp256k1_v0_4_1_scalar_add(&sum, &sum, &s); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); + rustsecp256k1_v0_4_1_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_4_1_pubkey_save(&data[i - 1], &Q); d[i - 1] = &data[i - 1]; - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); - rustsecp256k1_v0_4_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_4_0_pubkey_save(&sd, &Q); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &sd2, d, i) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); + rustsecp256k1_v0_4_1_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_4_1_pubkey_save(&sd, &Q); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &sd2, d, i) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); } } @@ -2453,74 +3274,45 @@ void run_ec_combine(void) { } } -void test_group_decompress(const rustsecp256k1_v0_4_0_fe* x) { +void test_group_decompress(const rustsecp256k1_v0_4_1_fe* x) { /* The input itself, normalized. */ - rustsecp256k1_v0_4_0_fe fex = *x; - rustsecp256k1_v0_4_0_fe fez; - /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ - rustsecp256k1_v0_4_0_ge ge_quad, ge_even, ge_odd; - rustsecp256k1_v0_4_0_gej gej_quad; + rustsecp256k1_v0_4_1_fe fex = *x; + /* Results of set_xo_var(..., 0), set_xo_var(..., 1). */ + rustsecp256k1_v0_4_1_ge ge_even, ge_odd; /* Return values of the above calls. */ - int res_quad, res_even, res_odd; + int res_even, res_odd; - rustsecp256k1_v0_4_0_fe_normalize_var(&fex); + rustsecp256k1_v0_4_1_fe_normalize_var(&fex); - res_quad = rustsecp256k1_v0_4_0_ge_set_xquad(&ge_quad, &fex); - res_even = rustsecp256k1_v0_4_0_ge_set_xo_var(&ge_even, &fex, 0); - res_odd = rustsecp256k1_v0_4_0_ge_set_xo_var(&ge_odd, &fex, 1); + res_even = rustsecp256k1_v0_4_1_ge_set_xo_var(&ge_even, &fex, 0); + res_odd = rustsecp256k1_v0_4_1_ge_set_xo_var(&ge_odd, &fex, 1); - CHECK(res_quad == res_even); - CHECK(res_quad == res_odd); + CHECK(res_even == res_odd); - if (res_quad) { - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_quad.x); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_odd.x); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_even.x); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_quad.y); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_odd.y); - rustsecp256k1_v0_4_0_fe_normalize_var(&ge_even.y); + if (res_even) { + rustsecp256k1_v0_4_1_fe_normalize_var(&ge_odd.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge_even.x); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge_odd.y); + rustsecp256k1_v0_4_1_fe_normalize_var(&ge_even.y); /* No infinity allowed. */ - CHECK(!ge_quad.infinity); CHECK(!ge_even.infinity); CHECK(!ge_odd.infinity); /* Check that the x coordinates check out. */ - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&ge_quad.x, x)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&ge_even.x, x)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&ge_odd.x, x)); - - /* Check that the Y coordinate result in ge_quad is a square. */ - CHECK(rustsecp256k1_v0_4_0_fe_is_quad_var(&ge_quad.y)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&ge_even.x, x)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&ge_odd.x, x)); /* Check odd/even Y in ge_odd, ge_even. */ - CHECK(rustsecp256k1_v0_4_0_fe_is_odd(&ge_odd.y)); - CHECK(!rustsecp256k1_v0_4_0_fe_is_odd(&ge_even.y)); - - /* Check rustsecp256k1_v0_4_0_gej_has_quad_y_var. */ - rustsecp256k1_v0_4_0_gej_set_ge(&gej_quad, &ge_quad); - CHECK(rustsecp256k1_v0_4_0_gej_has_quad_y_var(&gej_quad)); - do { - random_fe_test(&fez); - } while (rustsecp256k1_v0_4_0_fe_is_zero(&fez)); - rustsecp256k1_v0_4_0_gej_rescale(&gej_quad, &fez); - CHECK(rustsecp256k1_v0_4_0_gej_has_quad_y_var(&gej_quad)); - rustsecp256k1_v0_4_0_gej_neg(&gej_quad, &gej_quad); - CHECK(!rustsecp256k1_v0_4_0_gej_has_quad_y_var(&gej_quad)); - do { - random_fe_test(&fez); - } while (rustsecp256k1_v0_4_0_fe_is_zero(&fez)); - rustsecp256k1_v0_4_0_gej_rescale(&gej_quad, &fez); - CHECK(!rustsecp256k1_v0_4_0_gej_has_quad_y_var(&gej_quad)); - rustsecp256k1_v0_4_0_gej_neg(&gej_quad, &gej_quad); - CHECK(rustsecp256k1_v0_4_0_gej_has_quad_y_var(&gej_quad)); + CHECK(rustsecp256k1_v0_4_1_fe_is_odd(&ge_odd.y)); + CHECK(!rustsecp256k1_v0_4_1_fe_is_odd(&ge_even.y)); } } void run_group_decompress(void) { int i; for (i = 0; i < count * 4; i++) { - rustsecp256k1_v0_4_0_fe fe; + rustsecp256k1_v0_4_1_fe fe; random_fe_test(&fe); test_group_decompress(&fe); } @@ -2530,110 +3322,110 @@ void run_group_decompress(void) { void run_ecmult_chain(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_4_0_gej a = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_4_1_gej a = SECP256K1_GEJ_CONST( 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f ); /* two random initial factors xn and gn */ - rustsecp256k1_v0_4_0_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_4_1_scalar xn = SECP256K1_SCALAR_CONST( 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 ); - rustsecp256k1_v0_4_0_scalar gn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_4_1_scalar gn = SECP256K1_SCALAR_CONST( 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de ); /* two small multipliers to be applied to xn and gn in every iteration: */ - static const rustsecp256k1_v0_4_0_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); - static const rustsecp256k1_v0_4_0_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); + static const rustsecp256k1_v0_4_1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); + static const rustsecp256k1_v0_4_1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); /* accumulators with the resulting coefficients to A and G */ - rustsecp256k1_v0_4_0_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_4_0_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_4_1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_4_1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* actual points */ - rustsecp256k1_v0_4_0_gej x; - rustsecp256k1_v0_4_0_gej x2; + rustsecp256k1_v0_4_1_gej x; + rustsecp256k1_v0_4_1_gej x2; int i; /* the point being computed */ x = a; for (i = 0; i < 200*count; i++) { /* in each iteration, compute X = xn*X + gn*G; */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - rustsecp256k1_v0_4_0_scalar_mul(&ae, &ae, &xn); - rustsecp256k1_v0_4_0_scalar_mul(&ge, &ge, &xn); - rustsecp256k1_v0_4_0_scalar_add(&ge, &ge, &gn); + rustsecp256k1_v0_4_1_scalar_mul(&ae, &ae, &xn); + rustsecp256k1_v0_4_1_scalar_mul(&ge, &ge, &xn); + rustsecp256k1_v0_4_1_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ - rustsecp256k1_v0_4_0_scalar_mul(&xn, &xn, &xf); - rustsecp256k1_v0_4_0_scalar_mul(&gn, &gn, &gf); + rustsecp256k1_v0_4_1_scalar_mul(&xn, &xn, &xf); + rustsecp256k1_v0_4_1_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { /* expected result after 19999 iterations */ - rustsecp256k1_v0_4_0_gej rp = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_4_1_gej rp = SECP256K1_GEJ_CONST( 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 ); - rustsecp256k1_v0_4_0_gej_neg(&rp, &rp); - rustsecp256k1_v0_4_0_gej_add_var(&rp, &rp, &x, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&rp)); + rustsecp256k1_v0_4_1_gej_neg(&rp, &rp); + rustsecp256k1_v0_4_1_gej_add_var(&rp, &rp, &x, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&rp)); } } /* redo the computation, but directly with the resulting ae and ge coefficients: */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); - rustsecp256k1_v0_4_0_gej_neg(&x2, &x2); - rustsecp256k1_v0_4_0_gej_add_var(&x2, &x2, &x, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&x2)); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); + rustsecp256k1_v0_4_1_gej_neg(&x2, &x2); + rustsecp256k1_v0_4_1_gej_add_var(&x2, &x2, &x, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&x2)); } -void test_point_times_order(const rustsecp256k1_v0_4_0_gej *point) { +void test_point_times_order(const rustsecp256k1_v0_4_1_gej *point) { /* X * (point + G) + (order-X) * (pointer + G) = 0 */ - rustsecp256k1_v0_4_0_scalar x; - rustsecp256k1_v0_4_0_scalar nx; - rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_4_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_4_0_gej res1, res2; - rustsecp256k1_v0_4_0_ge res3; + rustsecp256k1_v0_4_1_scalar x; + rustsecp256k1_v0_4_1_scalar nx; + rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_4_1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_4_1_gej res1, res2; + rustsecp256k1_v0_4_1_ge res3; unsigned char pub[65]; size_t psize = 65; random_scalar_order_test(&x); - rustsecp256k1_v0_4_0_scalar_negate(&nx, &x); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ - rustsecp256k1_v0_4_0_gej_add_var(&res1, &res1, &res2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&res1)); - rustsecp256k1_v0_4_0_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&res3)); - CHECK(rustsecp256k1_v0_4_0_ge_is_valid_var(&res3) == 0); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); + rustsecp256k1_v0_4_1_scalar_negate(&nx, &x); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ + rustsecp256k1_v0_4_1_gej_add_var(&res1, &res1, &res2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&res1)); + rustsecp256k1_v0_4_1_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&res3)); + CHECK(rustsecp256k1_v0_4_1_ge_is_valid_var(&res3) == 0); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); psize = 65; - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); /* check zero/one edge cases */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); - rustsecp256k1_v0_4_0_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&res3)); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); - rustsecp256k1_v0_4_0_ge_set_gej(&res3, &res1); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); + rustsecp256k1_v0_4_1_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&res3)); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); + rustsecp256k1_v0_4_1_ge_set_gej(&res3, &res1); ge_equals_gej(&res3, point); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); - rustsecp256k1_v0_4_0_ge_set_gej(&res3, &res1); - ge_equals_ge(&res3, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); + rustsecp256k1_v0_4_1_ge_set_gej(&res3, &res1); + ge_equals_ge(&res3, &rustsecp256k1_v0_4_1_ge_const_g); } -/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_4_0_scalar_split_lambda. +/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_4_1_scalar_split_lambda. * * They are computed as: * - For a in [-2, -1, 0, 1, 2]: * - For b in [-3, -1, 1, 3]: * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER */ -static const rustsecp256k1_v0_4_0_scalar scalars_near_split_bounds[20] = { +static const rustsecp256k1_v0_4_1_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe), @@ -2656,43 +3448,43 @@ static const rustsecp256k1_v0_4_0_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45) }; -void test_ecmult_target(const rustsecp256k1_v0_4_0_scalar* target, int mode) { +void test_ecmult_target(const rustsecp256k1_v0_4_1_scalar* target, int mode) { /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */ - rustsecp256k1_v0_4_0_scalar n1, n2; - rustsecp256k1_v0_4_0_ge p; - rustsecp256k1_v0_4_0_gej pj, p1j, p2j, ptj; - static const rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_4_1_scalar n1, n2; + rustsecp256k1_v0_4_1_ge p; + rustsecp256k1_v0_4_1_gej pj, p1j, p2j, ptj; + static const rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* Generate random n1,n2 such that n1+n2 = -target. */ random_scalar_order_test(&n1); - rustsecp256k1_v0_4_0_scalar_add(&n2, &n1, target); - rustsecp256k1_v0_4_0_scalar_negate(&n2, &n2); + rustsecp256k1_v0_4_1_scalar_add(&n2, &n1, target); + rustsecp256k1_v0_4_1_scalar_negate(&n2, &n2); /* Generate a random input point. */ if (mode != 0) { random_group_element_test(&p); - rustsecp256k1_v0_4_0_gej_set_ge(&pj, &p); + rustsecp256k1_v0_4_1_gej_set_ge(&pj, &p); } /* EC multiplications */ if (mode == 0) { - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); } else if (mode == 1) { - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero); } else { - rustsecp256k1_v0_4_0_ecmult_const(&p1j, &p, &n1, 256); - rustsecp256k1_v0_4_0_ecmult_const(&p2j, &p, &n2, 256); - rustsecp256k1_v0_4_0_ecmult_const(&ptj, &p, target, 256); + rustsecp256k1_v0_4_1_ecmult_const(&p1j, &p, &n1, 256); + rustsecp256k1_v0_4_1_ecmult_const(&p2j, &p, &n2, 256); + rustsecp256k1_v0_4_1_ecmult_const(&ptj, &p, target, 256); } /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */ - rustsecp256k1_v0_4_0_gej_add_var(&ptj, &ptj, &p1j, NULL); - rustsecp256k1_v0_4_0_gej_add_var(&ptj, &ptj, &p2j, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&ptj)); + rustsecp256k1_v0_4_1_gej_add_var(&ptj, &ptj, &p1j, NULL); + rustsecp256k1_v0_4_1_gej_add_var(&ptj, &ptj, &p2j, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&ptj)); } void run_ecmult_near_split_bound(void) { @@ -2709,118 +3501,118 @@ void run_ecmult_near_split_bound(void) { void run_point_times_order(void) { int i; - rustsecp256k1_v0_4_0_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); - static const rustsecp256k1_v0_4_0_fe xr = SECP256K1_FE_CONST( + rustsecp256k1_v0_4_1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); + static const rustsecp256k1_v0_4_1_fe xr = SECP256K1_FE_CONST( 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 ); for (i = 0; i < 500; i++) { - rustsecp256k1_v0_4_0_ge p; - if (rustsecp256k1_v0_4_0_ge_set_xo_var(&p, &x, 1)) { - rustsecp256k1_v0_4_0_gej j; - CHECK(rustsecp256k1_v0_4_0_ge_is_valid_var(&p)); - rustsecp256k1_v0_4_0_gej_set_ge(&j, &p); + rustsecp256k1_v0_4_1_ge p; + if (rustsecp256k1_v0_4_1_ge_set_xo_var(&p, &x, 1)) { + rustsecp256k1_v0_4_1_gej j; + CHECK(rustsecp256k1_v0_4_1_ge_is_valid_var(&p)); + rustsecp256k1_v0_4_1_gej_set_ge(&j, &p); test_point_times_order(&j); } - rustsecp256k1_v0_4_0_fe_sqr(&x, &x); + rustsecp256k1_v0_4_1_fe_sqr(&x, &x); } - rustsecp256k1_v0_4_0_fe_normalize_var(&x); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&x, &xr)); + rustsecp256k1_v0_4_1_fe_normalize_var(&x); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&x, &xr)); } void ecmult_const_random_mult(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_4_0_ge a = SECP256K1_GE_CONST( + rustsecp256k1_v0_4_1_ge a = SECP256K1_GE_CONST( 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d ); /* random initial factor xn */ - rustsecp256k1_v0_4_0_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_4_1_scalar xn = SECP256K1_SCALAR_CONST( 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b ); /* expected xn * A (from sage) */ - rustsecp256k1_v0_4_0_ge expected_b = SECP256K1_GE_CONST( + rustsecp256k1_v0_4_1_ge expected_b = SECP256K1_GE_CONST( 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 ); - rustsecp256k1_v0_4_0_gej b; - rustsecp256k1_v0_4_0_ecmult_const(&b, &a, &xn, 256); + rustsecp256k1_v0_4_1_gej b; + rustsecp256k1_v0_4_1_ecmult_const(&b, &a, &xn, 256); - CHECK(rustsecp256k1_v0_4_0_ge_is_valid_var(&a)); + CHECK(rustsecp256k1_v0_4_1_ge_is_valid_var(&a)); ge_equals_gej(&expected_b, &b); } void ecmult_const_commutativity(void) { - rustsecp256k1_v0_4_0_scalar a; - rustsecp256k1_v0_4_0_scalar b; - rustsecp256k1_v0_4_0_gej res1; - rustsecp256k1_v0_4_0_gej res2; - rustsecp256k1_v0_4_0_ge mid1; - rustsecp256k1_v0_4_0_ge mid2; + rustsecp256k1_v0_4_1_scalar a; + rustsecp256k1_v0_4_1_scalar b; + rustsecp256k1_v0_4_1_gej res1; + rustsecp256k1_v0_4_1_gej res2; + rustsecp256k1_v0_4_1_ge mid1; + rustsecp256k1_v0_4_1_ge mid2; random_scalar_order_test(&a); random_scalar_order_test(&b); - rustsecp256k1_v0_4_0_ecmult_const(&res1, &rustsecp256k1_v0_4_0_ge_const_g, &a, 256); - rustsecp256k1_v0_4_0_ecmult_const(&res2, &rustsecp256k1_v0_4_0_ge_const_g, &b, 256); - rustsecp256k1_v0_4_0_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_4_0_ge_set_gej(&mid2, &res2); - rustsecp256k1_v0_4_0_ecmult_const(&res1, &mid1, &b, 256); - rustsecp256k1_v0_4_0_ecmult_const(&res2, &mid2, &a, 256); - rustsecp256k1_v0_4_0_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_4_0_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_4_1_ecmult_const(&res1, &rustsecp256k1_v0_4_1_ge_const_g, &a, 256); + rustsecp256k1_v0_4_1_ecmult_const(&res2, &rustsecp256k1_v0_4_1_ge_const_g, &b, 256); + rustsecp256k1_v0_4_1_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_4_1_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_4_1_ecmult_const(&res1, &mid1, &b, 256); + rustsecp256k1_v0_4_1_ecmult_const(&res2, &mid2, &a, 256); + rustsecp256k1_v0_4_1_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_4_1_ge_set_gej(&mid2, &res2); ge_equals_ge(&mid1, &mid2); } void ecmult_const_mult_zero_one(void) { - rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_4_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_4_0_scalar negone; - rustsecp256k1_v0_4_0_gej res1; - rustsecp256k1_v0_4_0_ge res2; - rustsecp256k1_v0_4_0_ge point; - rustsecp256k1_v0_4_0_scalar_negate(&negone, &one); + rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_4_1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_4_1_scalar negone; + rustsecp256k1_v0_4_1_gej res1; + rustsecp256k1_v0_4_1_ge res2; + rustsecp256k1_v0_4_1_ge point; + rustsecp256k1_v0_4_1_scalar_negate(&negone, &one); random_group_element_test(&point); - rustsecp256k1_v0_4_0_ecmult_const(&res1, &point, &zero, 3); - rustsecp256k1_v0_4_0_ge_set_gej(&res2, &res1); - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&res2)); - rustsecp256k1_v0_4_0_ecmult_const(&res1, &point, &one, 2); - rustsecp256k1_v0_4_0_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_4_1_ecmult_const(&res1, &point, &zero, 3); + rustsecp256k1_v0_4_1_ge_set_gej(&res2, &res1); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&res2)); + rustsecp256k1_v0_4_1_ecmult_const(&res1, &point, &one, 2); + rustsecp256k1_v0_4_1_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); - rustsecp256k1_v0_4_0_ecmult_const(&res1, &point, &negone, 256); - rustsecp256k1_v0_4_0_gej_neg(&res1, &res1); - rustsecp256k1_v0_4_0_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_4_1_ecmult_const(&res1, &point, &negone, 256); + rustsecp256k1_v0_4_1_gej_neg(&res1, &res1); + rustsecp256k1_v0_4_1_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); } void ecmult_const_chain_multiply(void) { /* Check known result (randomly generated test problem from sage) */ - const rustsecp256k1_v0_4_0_scalar scalar = SECP256K1_SCALAR_CONST( + const rustsecp256k1_v0_4_1_scalar scalar = SECP256K1_SCALAR_CONST( 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b ); - const rustsecp256k1_v0_4_0_gej expected_point = SECP256K1_GEJ_CONST( + const rustsecp256k1_v0_4_1_gej expected_point = SECP256K1_GEJ_CONST( 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 ); - rustsecp256k1_v0_4_0_gej point; - rustsecp256k1_v0_4_0_ge res; + rustsecp256k1_v0_4_1_gej point; + rustsecp256k1_v0_4_1_ge res; int i; - rustsecp256k1_v0_4_0_gej_set_ge(&point, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_gej_set_ge(&point, &rustsecp256k1_v0_4_1_ge_const_g); for (i = 0; i < 100; ++i) { - rustsecp256k1_v0_4_0_ge tmp; - rustsecp256k1_v0_4_0_ge_set_gej(&tmp, &point); - rustsecp256k1_v0_4_0_ecmult_const(&point, &tmp, &scalar, 256); + rustsecp256k1_v0_4_1_ge tmp; + rustsecp256k1_v0_4_1_ge_set_gej(&tmp, &point); + rustsecp256k1_v0_4_1_ecmult_const(&point, &tmp, &scalar, 256); } - rustsecp256k1_v0_4_0_ge_set_gej(&res, &point); + rustsecp256k1_v0_4_1_ge_set_gej(&res, &point); ge_equals_gej(&res, &expected_point); } @@ -2832,18 +3624,18 @@ void run_ecmult_const_tests(void) { } typedef struct { - rustsecp256k1_v0_4_0_scalar *sc; - rustsecp256k1_v0_4_0_ge *pt; + rustsecp256k1_v0_4_1_scalar *sc; + rustsecp256k1_v0_4_1_ge *pt; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_4_0_scalar *sc, rustsecp256k1_v0_4_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_4_1_scalar *sc, rustsecp256k1_v0_4_1_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -static int ecmult_multi_false_callback(rustsecp256k1_v0_4_0_scalar *sc, rustsecp256k1_v0_4_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_false_callback(rustsecp256k1_v0_4_1_scalar *sc, rustsecp256k1_v0_4_1_ge *pt, size_t idx, void *cbdata) { (void)sc; (void)pt; (void)idx; @@ -2851,102 +3643,102 @@ static int ecmult_multi_false_callback(rustsecp256k1_v0_4_0_scalar *sc, rustsecp return 0; } -void test_ecmult_multi(rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_ecmult_multi_func ecmult_multi) { +void test_ecmult_multi(rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_ecmult_multi_func ecmult_multi) { int ncount; - rustsecp256k1_v0_4_0_scalar szero; - rustsecp256k1_v0_4_0_scalar sc[32]; - rustsecp256k1_v0_4_0_ge pt[32]; - rustsecp256k1_v0_4_0_gej r; - rustsecp256k1_v0_4_0_gej r2; + rustsecp256k1_v0_4_1_scalar szero; + rustsecp256k1_v0_4_1_scalar sc[32]; + rustsecp256k1_v0_4_1_ge pt[32]; + rustsecp256k1_v0_4_1_gej r; + rustsecp256k1_v0_4_1_gej r2; ecmult_multi_data data; data.sc = sc; data.pt = pt; - rustsecp256k1_v0_4_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&szero, 0); /* No points to multiply */ CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < count; ncount++) { - rustsecp256k1_v0_4_0_ge ptg; - rustsecp256k1_v0_4_0_gej ptgj; + rustsecp256k1_v0_4_1_ge ptg; + rustsecp256k1_v0_4_1_gej ptgj; random_scalar_order(&sc[0]); random_scalar_order(&sc[1]); random_group_element_test(&ptg); - rustsecp256k1_v0_4_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_4_1_gej_set_ge(&ptgj, &ptg); pt[0] = ptg; - pt[1] = rustsecp256k1_v0_4_0_ge_const_g; + pt[1] = rustsecp256k1_v0_4_1_ge_const_g; /* only G scalar */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); /* 1-point */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); /* Try to multiply 1 point, but callback returns false */ CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); /* 2-point with G scalar */ - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } /* Check infinite outputs of various forms */ for (ncount = 0; ncount < count; ncount++) { - rustsecp256k1_v0_4_0_ge ptg; + rustsecp256k1_v0_4_1_ge ptg; size_t i, j; size_t sizes[] = { 2, 10, 32 }; for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_scalar_order(&sc[i]); - rustsecp256k1_v0_4_0_ge_set_infinity(&pt[i]); + rustsecp256k1_v0_4_1_ge_set_infinity(&pt[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_4_0_scalar_set_int(&sc[i], 0); + rustsecp256k1_v0_4_1_scalar_set_int(&sc[i], 0); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { random_group_element_test(&ptg); for (i = 0; i < 16; i++) { random_scalar_order(&sc[2*i]); - rustsecp256k1_v0_4_0_scalar_negate(&sc[2*i + 1], &sc[2*i]); + rustsecp256k1_v0_4_1_scalar_negate(&sc[2*i + 1], &sc[2*i]); pt[2 * i] = ptg; pt[2 * i + 1] = ptg; } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); random_scalar_order(&sc[0]); for (i = 0; i < 16; i++) { @@ -2955,70 +3747,70 @@ void test_ecmult_multi(rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4 sc[2*i] = sc[0]; sc[2*i+1] = sc[0]; pt[2 * i] = ptg; - rustsecp256k1_v0_4_0_ge_neg(&pt[2*i+1], &pt[2*i]); + rustsecp256k1_v0_4_1_ge_neg(&pt[2*i+1], &pt[2*i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } random_group_element_test(&ptg); - rustsecp256k1_v0_4_0_scalar_set_int(&sc[0], 0); + rustsecp256k1_v0_4_1_scalar_set_int(&sc[0], 0); pt[0] = ptg; for (i = 1; i < 32; i++) { pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_4_0_scalar_add(&sc[0], &sc[0], &sc[i]); - rustsecp256k1_v0_4_0_scalar_negate(&sc[i], &sc[i]); + rustsecp256k1_v0_4_1_scalar_add(&sc[0], &sc[0], &sc[i]); + rustsecp256k1_v0_4_1_scalar_negate(&sc[i], &sc[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } /* Check random points, constant scalar */ for (ncount = 0; ncount < count; ncount++) { size_t i; - rustsecp256k1_v0_4_0_gej_set_infinity(&r); + rustsecp256k1_v0_4_1_gej_set_infinity(&r); random_scalar_order(&sc[0]); for (i = 0; i < 20; i++) { - rustsecp256k1_v0_4_0_ge ptg; + rustsecp256k1_v0_4_1_ge ptg; sc[i] = sc[0]; random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_4_0_gej_add_ge_var(&r, &r, &pt[i], NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&r, &r, &pt[i], NULL); } - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } /* Check random scalars, constant point */ for (ncount = 0; ncount < count; ncount++) { size_t i; - rustsecp256k1_v0_4_0_ge ptg; - rustsecp256k1_v0_4_0_gej p0j; - rustsecp256k1_v0_4_0_scalar rs; - rustsecp256k1_v0_4_0_scalar_set_int(&rs, 0); + rustsecp256k1_v0_4_1_ge ptg; + rustsecp256k1_v0_4_1_gej p0j; + rustsecp256k1_v0_4_1_scalar rs; + rustsecp256k1_v0_4_1_scalar_set_int(&rs, 0); random_group_element_test(&ptg); for (i = 0; i < 20; i++) { random_scalar_order(&sc[i]); pt[i] = ptg; - rustsecp256k1_v0_4_0_scalar_add(&rs, &rs, &sc[i]); + rustsecp256k1_v0_4_1_scalar_add(&rs, &rs, &sc[i]); } - rustsecp256k1_v0_4_0_gej_set_ge(&p0j, &pt[0]); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); + rustsecp256k1_v0_4_1_gej_set_ge(&p0j, &pt[0]); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } /* Sanity check that zero scalars don't cause problems */ @@ -3027,62 +3819,62 @@ void test_ecmult_multi(rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4 random_group_element_test(&pt[ncount]); } - rustsecp256k1_v0_4_0_scalar_clear(&sc[0]); + rustsecp256k1_v0_4_1_scalar_clear(&sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_4_0_scalar_clear(&sc[1]); - rustsecp256k1_v0_4_0_scalar_clear(&sc[2]); - rustsecp256k1_v0_4_0_scalar_clear(&sc[3]); - rustsecp256k1_v0_4_0_scalar_clear(&sc[4]); + rustsecp256k1_v0_4_1_scalar_clear(&sc[1]); + rustsecp256k1_v0_4_1_scalar_clear(&sc[2]); + rustsecp256k1_v0_4_1_scalar_clear(&sc[3]); + rustsecp256k1_v0_4_1_scalar_clear(&sc[4]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ { const size_t TOP = 8; size_t s0i, s1i; size_t t0i, t1i; - rustsecp256k1_v0_4_0_ge ptg; - rustsecp256k1_v0_4_0_gej ptgj; + rustsecp256k1_v0_4_1_ge ptg; + rustsecp256k1_v0_4_1_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_4_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_4_1_gej_set_ge(&ptgj, &ptg); for(t0i = 0; t0i < TOP; t0i++) { for(t1i = 0; t1i < TOP; t1i++) { - rustsecp256k1_v0_4_0_gej t0p, t1p; - rustsecp256k1_v0_4_0_scalar t0, t1; + rustsecp256k1_v0_4_1_gej t0p, t1p; + rustsecp256k1_v0_4_1_scalar t0, t1; - rustsecp256k1_v0_4_0_scalar_set_int(&t0, (t0i + 1) / 2); - rustsecp256k1_v0_4_0_scalar_cond_negate(&t0, t0i & 1); - rustsecp256k1_v0_4_0_scalar_set_int(&t1, (t1i + 1) / 2); - rustsecp256k1_v0_4_0_scalar_cond_negate(&t1, t1i & 1); + rustsecp256k1_v0_4_1_scalar_set_int(&t0, (t0i + 1) / 2); + rustsecp256k1_v0_4_1_scalar_cond_negate(&t0, t0i & 1); + rustsecp256k1_v0_4_1_scalar_set_int(&t1, (t1i + 1) / 2); + rustsecp256k1_v0_4_1_scalar_cond_negate(&t1, t1i & 1); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero); for(s0i = 0; s0i < TOP; s0i++) { for(s1i = 0; s1i < TOP; s1i++) { - rustsecp256k1_v0_4_0_scalar tmp1, tmp2; - rustsecp256k1_v0_4_0_gej expected, actual; + rustsecp256k1_v0_4_1_scalar tmp1, tmp2; + rustsecp256k1_v0_4_1_gej expected, actual; - rustsecp256k1_v0_4_0_ge_set_gej(&pt[0], &t0p); - rustsecp256k1_v0_4_0_ge_set_gej(&pt[1], &t1p); + rustsecp256k1_v0_4_1_ge_set_gej(&pt[0], &t0p); + rustsecp256k1_v0_4_1_ge_set_gej(&pt[1], &t1p); - rustsecp256k1_v0_4_0_scalar_set_int(&sc[0], (s0i + 1) / 2); - rustsecp256k1_v0_4_0_scalar_cond_negate(&sc[0], s0i & 1); - rustsecp256k1_v0_4_0_scalar_set_int(&sc[1], (s1i + 1) / 2); - rustsecp256k1_v0_4_0_scalar_cond_negate(&sc[1], s1i & 1); + rustsecp256k1_v0_4_1_scalar_set_int(&sc[0], (s0i + 1) / 2); + rustsecp256k1_v0_4_1_scalar_cond_negate(&sc[0], s0i & 1); + rustsecp256k1_v0_4_1_scalar_set_int(&sc[1], (s1i + 1) / 2); + rustsecp256k1_v0_4_1_scalar_cond_negate(&sc[1], s1i & 1); - rustsecp256k1_v0_4_0_scalar_mul(&tmp1, &t0, &sc[0]); - rustsecp256k1_v0_4_0_scalar_mul(&tmp2, &t1, &sc[1]); - rustsecp256k1_v0_4_0_scalar_add(&tmp1, &tmp1, &tmp2); + rustsecp256k1_v0_4_1_scalar_mul(&tmp1, &t0, &sc[0]); + rustsecp256k1_v0_4_1_scalar_mul(&tmp2, &t1, &sc[1]); + rustsecp256k1_v0_4_1_scalar_add(&tmp1, &tmp1, &tmp2); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); - rustsecp256k1_v0_4_0_gej_neg(&expected, &expected); - rustsecp256k1_v0_4_0_gej_add_var(&actual, &actual, &expected, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&actual)); + rustsecp256k1_v0_4_1_gej_neg(&expected, &expected); + rustsecp256k1_v0_4_1_gej_add_var(&actual, &actual, &expected, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&actual)); } } } @@ -3090,38 +3882,38 @@ void test_ecmult_multi(rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4 } } -void test_ecmult_multi_batch_single(rustsecp256k1_v0_4_0_ecmult_multi_func ecmult_multi) { - rustsecp256k1_v0_4_0_scalar szero; - rustsecp256k1_v0_4_0_scalar sc; - rustsecp256k1_v0_4_0_ge pt; - rustsecp256k1_v0_4_0_gej r; +void test_ecmult_multi_batch_single(rustsecp256k1_v0_4_1_ecmult_multi_func ecmult_multi) { + rustsecp256k1_v0_4_1_scalar szero; + rustsecp256k1_v0_4_1_scalar sc; + rustsecp256k1_v0_4_1_ge pt; + rustsecp256k1_v0_4_1_gej r; ecmult_multi_data data; - rustsecp256k1_v0_4_0_scratch *scratch_empty; + rustsecp256k1_v0_4_1_scratch *scratch_empty; random_group_element_test(&pt); random_scalar_order(&sc); data.sc = ≻ data.pt = &pt; - rustsecp256k1_v0_4_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&szero, 0); /* Try to multiply 1 point, but scratch space is empty.*/ - scratch_empty = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, 0); + scratch_empty = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, 0); CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch_empty); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch_empty); } -void test_rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(void) { +void test_rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(void) { int i; - CHECK(rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(0) == 0); + CHECK(rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { /* Bucket_window of 8 is not used with endo */ if (i == 8) { continue; } - CHECK(rustsecp256k1_v0_4_0_pippenger_bucket_window(rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(i)) == i); + CHECK(rustsecp256k1_v0_4_1_pippenger_bucket_window(rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { - CHECK(rustsecp256k1_v0_4_0_pippenger_bucket_window(rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(i)+1) > i); + CHECK(rustsecp256k1_v0_4_1_pippenger_bucket_window(rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(i)+1) > i); } } } @@ -3131,9 +3923,9 @@ void test_rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(void) { * for a given scratch space. */ void test_ecmult_multi_pippenger_max_points(void) { - size_t scratch_size = rustsecp256k1_v0_4_0_testrand_int(256); - size_t max_size = rustsecp256k1_v0_4_0_pippenger_scratch_size(rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); - rustsecp256k1_v0_4_0_scratch *scratch; + size_t scratch_size = rustsecp256k1_v0_4_1_testrand_int(256); + size_t max_size = rustsecp256k1_v0_4_1_pippenger_scratch_size(rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); + rustsecp256k1_v0_4_1_scratch *scratch; size_t n_points_supported; int bucket_window = 0; @@ -3141,24 +3933,24 @@ void test_ecmult_multi_pippenger_max_points(void) { size_t i; size_t total_alloc; size_t checkpoint; - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, scratch_size); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, scratch_size); CHECK(scratch != NULL); - checkpoint = rustsecp256k1_v0_4_0_scratch_checkpoint(&ctx->error_callback, scratch); - n_points_supported = rustsecp256k1_v0_4_0_pippenger_max_points(&ctx->error_callback, scratch); + checkpoint = rustsecp256k1_v0_4_1_scratch_checkpoint(&ctx->error_callback, scratch); + n_points_supported = rustsecp256k1_v0_4_1_pippenger_max_points(&ctx->error_callback, scratch); if (n_points_supported == 0) { - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); continue; } - bucket_window = rustsecp256k1_v0_4_0_pippenger_bucket_window(n_points_supported); + bucket_window = rustsecp256k1_v0_4_1_pippenger_bucket_window(n_points_supported); /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ - total_alloc = rustsecp256k1_v0_4_0_pippenger_scratch_size(n_points_supported, bucket_window); + total_alloc = rustsecp256k1_v0_4_1_pippenger_scratch_size(n_points_supported, bucket_window); for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&ctx->error_callback, scratch, 1)); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&ctx->error_callback, scratch, 1)); total_alloc--; } - CHECK(rustsecp256k1_v0_4_0_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); - rustsecp256k1_v0_4_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_4_1_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); + rustsecp256k1_v0_4_1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } @@ -3168,152 +3960,152 @@ void test_ecmult_multi_batch_size_helper(void) { max_n_batch_points = 0; n = 1; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); max_n_batch_points = 1; n = 0; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 0); CHECK(n_batch_points == 0); max_n_batch_points = 2; n = 5; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 3); CHECK(n_batch_points == 2); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; n = ECMULT_MAX_POINTS_PER_BATCH; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 1); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; n = ECMULT_MAX_POINTS_PER_BATCH + 1; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 2); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); max_n_batch_points = 1; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX); CHECK(n_batch_points == 1); max_n_batch_points = 2; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX/2 + 1); CHECK(n_batch_points == 2); } /** - * Run rustsecp256k1_v0_4_0_ecmult_multi_var with num points and a scratch space restricted to + * Run rustsecp256k1_v0_4_1_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. */ void test_ecmult_multi_batching(void) { static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD; - rustsecp256k1_v0_4_0_scalar scG; - rustsecp256k1_v0_4_0_scalar szero; - rustsecp256k1_v0_4_0_scalar *sc = (rustsecp256k1_v0_4_0_scalar *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_scalar) * n_points); - rustsecp256k1_v0_4_0_ge *pt = (rustsecp256k1_v0_4_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_0_ge) * n_points); - rustsecp256k1_v0_4_0_gej r; - rustsecp256k1_v0_4_0_gej r2; + rustsecp256k1_v0_4_1_scalar scG; + rustsecp256k1_v0_4_1_scalar szero; + rustsecp256k1_v0_4_1_scalar *sc = (rustsecp256k1_v0_4_1_scalar *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_1_scalar) * n_points); + rustsecp256k1_v0_4_1_ge *pt = (rustsecp256k1_v0_4_1_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_4_1_ge) * n_points); + rustsecp256k1_v0_4_1_gej r; + rustsecp256k1_v0_4_1_gej r2; ecmult_multi_data data; int i; - rustsecp256k1_v0_4_0_scratch *scratch; + rustsecp256k1_v0_4_1_scratch *scratch; - rustsecp256k1_v0_4_0_gej_set_infinity(&r2); - rustsecp256k1_v0_4_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_4_1_gej_set_infinity(&r2); + rustsecp256k1_v0_4_1_scalar_set_int(&szero, 0); /* Get random scalars and group elements and compute result */ random_scalar_order(&scG); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG); for(i = 0; i < n_points; i++) { - rustsecp256k1_v0_4_0_ge ptg; - rustsecp256k1_v0_4_0_gej ptgj; + rustsecp256k1_v0_4_1_ge ptg; + rustsecp256k1_v0_4_1_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_4_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_4_1_gej_set_ge(&ptgj, &ptg); pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL); - rustsecp256k1_v0_4_0_gej_add_var(&r2, &r2, &ptgj, NULL); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL); + rustsecp256k1_v0_4_1_gej_add_var(&r2, &r2, &ptgj, NULL); } data.sc = sc; data.pt = pt; - rustsecp256k1_v0_4_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_4_1_gej_neg(&r2, &r2); /* Test with empty scratch space. It should compute the correct result using * ecmult_mult_simple algorithm which doesn't require a scratch space. */ - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, 0); - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, 0); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because * ecmult_multi selects strauss which requires more memory. It should * therefore select the simple algorithm. */ - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_4_0_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, rustsecp256k1_v0_4_1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { - int bucket_window = rustsecp256k1_v0_4_0_pippenger_bucket_window(i); - size_t scratch_size = rustsecp256k1_v0_4_0_pippenger_scratch_size(i, bucket_window); - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + int bucket_window = rustsecp256k1_v0_4_1_pippenger_bucket_window(i); + size_t scratch_size = rustsecp256k1_v0_4_1_pippenger_scratch_size(i, bucket_window); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); } else { - size_t scratch_size = rustsecp256k1_v0_4_0_strauss_scratch_size(i); - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + size_t scratch_size = rustsecp256k1_v0_4_1_strauss_scratch_size(i); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } - CHECK(rustsecp256k1_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_4_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_4_1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_4_1_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); } free(sc); free(pt); } void run_ecmult_multi_tests(void) { - rustsecp256k1_v0_4_0_scratch *scratch; + rustsecp256k1_v0_4_1_scratch *scratch; - test_rustsecp256k1_v0_4_0_pippenger_bucket_window_inv(); + test_rustsecp256k1_v0_4_1_pippenger_bucket_window_inv(); test_ecmult_multi_pippenger_max_points(); - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, 819200); - test_ecmult_multi(scratch, rustsecp256k1_v0_4_0_ecmult_multi_var); - test_ecmult_multi(NULL, rustsecp256k1_v0_4_0_ecmult_multi_var); - test_ecmult_multi(scratch, rustsecp256k1_v0_4_0_ecmult_pippenger_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_4_0_ecmult_pippenger_batch_single); - test_ecmult_multi(scratch, rustsecp256k1_v0_4_0_ecmult_strauss_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_4_0_ecmult_strauss_batch_single); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, 819200); + test_ecmult_multi(scratch, rustsecp256k1_v0_4_1_ecmult_multi_var); + test_ecmult_multi(NULL, rustsecp256k1_v0_4_1_ecmult_multi_var); + test_ecmult_multi(scratch, rustsecp256k1_v0_4_1_ecmult_pippenger_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_4_1_ecmult_pippenger_batch_single); + test_ecmult_multi(scratch, rustsecp256k1_v0_4_1_ecmult_strauss_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_4_1_ecmult_strauss_batch_single); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ - scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_4_0_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); - test_ecmult_multi(scratch, rustsecp256k1_v0_4_0_ecmult_multi_var); - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, rustsecp256k1_v0_4_1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + test_ecmult_multi(scratch, rustsecp256k1_v0_4_1_ecmult_multi_var); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } -void test_wnaf(const rustsecp256k1_v0_4_0_scalar *number, int w) { - rustsecp256k1_v0_4_0_scalar x, two, t; +void test_wnaf(const rustsecp256k1_v0_4_1_scalar *number, int w) { + rustsecp256k1_v0_4_1_scalar x, two, t; int wnaf[256]; int zeroes = -1; int i; int bits; - rustsecp256k1_v0_4_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&two, 2); - bits = rustsecp256k1_v0_4_0_ecmult_wnaf(wnaf, 256, number, w); + rustsecp256k1_v0_4_1_scalar_set_int(&x, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&two, 2); + bits = rustsecp256k1_v0_4_1_ecmult_wnaf(wnaf, 256, number, w); CHECK(bits <= 256); for (i = bits-1; i >= 0; i--) { int v = wnaf[i]; - rustsecp256k1_v0_4_0_scalar_mul(&x, &x, &two); + rustsecp256k1_v0_4_1_scalar_mul(&x, &x, &two); if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; @@ -3325,104 +4117,104 @@ void test_wnaf(const rustsecp256k1_v0_4_0_scalar *number, int w) { zeroes++; } if (v >= 0) { - rustsecp256k1_v0_4_0_scalar_set_int(&t, v); + rustsecp256k1_v0_4_1_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_4_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_4_0_scalar_negate(&t, &t); + rustsecp256k1_v0_4_1_scalar_set_int(&t, -v); + rustsecp256k1_v0_4_1_scalar_negate(&t, &t); } - rustsecp256k1_v0_4_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_4_1_scalar_add(&x, &x, &t); } - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&x, number)); /* check that wnaf represents number */ + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x, number)); /* check that wnaf represents number */ } -void test_constant_wnaf_negate(const rustsecp256k1_v0_4_0_scalar *number) { - rustsecp256k1_v0_4_0_scalar neg1 = *number; - rustsecp256k1_v0_4_0_scalar neg2 = *number; +void test_constant_wnaf_negate(const rustsecp256k1_v0_4_1_scalar *number) { + rustsecp256k1_v0_4_1_scalar neg1 = *number; + rustsecp256k1_v0_4_1_scalar neg2 = *number; int sign1 = 1; int sign2 = 1; - if (!rustsecp256k1_v0_4_0_scalar_get_bits(&neg1, 0, 1)) { - rustsecp256k1_v0_4_0_scalar_negate(&neg1, &neg1); + if (!rustsecp256k1_v0_4_1_scalar_get_bits(&neg1, 0, 1)) { + rustsecp256k1_v0_4_1_scalar_negate(&neg1, &neg1); sign1 = -1; } - sign2 = rustsecp256k1_v0_4_0_scalar_cond_negate(&neg2, rustsecp256k1_v0_4_0_scalar_is_even(&neg2)); + sign2 = rustsecp256k1_v0_4_1_scalar_cond_negate(&neg2, rustsecp256k1_v0_4_1_scalar_is_even(&neg2)); CHECK(sign1 == sign2); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&neg1, &neg2)); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&neg1, &neg2)); } -void test_constant_wnaf(const rustsecp256k1_v0_4_0_scalar *number, int w) { - rustsecp256k1_v0_4_0_scalar x, shift; +void test_constant_wnaf(const rustsecp256k1_v0_4_1_scalar *number, int w) { + rustsecp256k1_v0_4_1_scalar x, shift; int wnaf[256] = {0}; int i; int skew; int bits = 256; - rustsecp256k1_v0_4_0_scalar num = *number; - rustsecp256k1_v0_4_0_scalar scalar_skew; + rustsecp256k1_v0_4_1_scalar num = *number; + rustsecp256k1_v0_4_1_scalar scalar_skew; - rustsecp256k1_v0_4_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_4_1_scalar_set_int(&x, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&shift, 1 << w); for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_4_0_scalar_shr_int(&num, 8); + rustsecp256k1_v0_4_1_scalar_shr_int(&num, 8); } bits = 128; - skew = rustsecp256k1_v0_4_0_wnaf_const(wnaf, &num, w, bits); + skew = rustsecp256k1_v0_4_1_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { - rustsecp256k1_v0_4_0_scalar t; + rustsecp256k1_v0_4_1_scalar t; int v = wnaf[i]; CHECK(v != 0); /* check nonzero */ CHECK(v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - rustsecp256k1_v0_4_0_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_4_1_scalar_mul(&x, &x, &shift); if (v >= 0) { - rustsecp256k1_v0_4_0_scalar_set_int(&t, v); + rustsecp256k1_v0_4_1_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_4_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_4_0_scalar_negate(&t, &t); + rustsecp256k1_v0_4_1_scalar_set_int(&t, -v); + rustsecp256k1_v0_4_1_scalar_negate(&t, &t); } - rustsecp256k1_v0_4_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_4_1_scalar_add(&x, &x, &t); } /* Skew num because when encoding numbers as odd we use an offset */ - rustsecp256k1_v0_4_0_scalar_set_int(&scalar_skew, 1 << (skew == 2)); - rustsecp256k1_v0_4_0_scalar_add(&num, &num, &scalar_skew); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&x, &num)); + rustsecp256k1_v0_4_1_scalar_set_int(&scalar_skew, 1 << (skew == 2)); + rustsecp256k1_v0_4_1_scalar_add(&num, &num, &scalar_skew); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x, &num)); } -void test_fixed_wnaf(const rustsecp256k1_v0_4_0_scalar *number, int w) { - rustsecp256k1_v0_4_0_scalar x, shift; +void test_fixed_wnaf(const rustsecp256k1_v0_4_1_scalar *number, int w) { + rustsecp256k1_v0_4_1_scalar x, shift; int wnaf[256] = {0}; int i; int skew; - rustsecp256k1_v0_4_0_scalar num = *number; + rustsecp256k1_v0_4_1_scalar num = *number; - rustsecp256k1_v0_4_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_4_1_scalar_set_int(&x, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&shift, 1 << w); for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_4_0_scalar_shr_int(&num, 8); + rustsecp256k1_v0_4_1_scalar_shr_int(&num, 8); } - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { - rustsecp256k1_v0_4_0_scalar t; + rustsecp256k1_v0_4_1_scalar t; int v = wnaf[i]; CHECK(v == 0 || v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - rustsecp256k1_v0_4_0_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_4_1_scalar_mul(&x, &x, &shift); if (v >= 0) { - rustsecp256k1_v0_4_0_scalar_set_int(&t, v); + rustsecp256k1_v0_4_1_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_4_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_4_0_scalar_negate(&t, &t); + rustsecp256k1_v0_4_1_scalar_set_int(&t, -v); + rustsecp256k1_v0_4_1_scalar_negate(&t, &t); } - rustsecp256k1_v0_4_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_4_1_scalar_add(&x, &x, &t); } /* If skew is 1 then add 1 to num */ - rustsecp256k1_v0_4_0_scalar_cadd_bit(&num, 0, skew == 1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&x, &num)); + rustsecp256k1_v0_4_1_scalar_cadd_bit(&num, 0, skew == 1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&x, &num)); } /* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the @@ -3442,18 +4234,18 @@ void test_fixed_wnaf_small(void) { int wnaf[256] = {0}; int i; int skew; - rustsecp256k1_v0_4_0_scalar num; + rustsecp256k1_v0_4_1_scalar num; - rustsecp256k1_v0_4_0_scalar_set_int(&num, 0); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 0); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(skew == 0); - rustsecp256k1_v0_4_0_scalar_set_int(&num, 1); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 1); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 1; --i) { int v = wnaf[i]; CHECK(v == 0); @@ -3463,29 +4255,29 @@ void test_fixed_wnaf_small(void) { { int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf }; - rustsecp256k1_v0_4_0_scalar_set_int(&num, 0xffffffff); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 0xffffffff); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf }; - rustsecp256k1_v0_4_0_scalar_set_int(&num, 0xeeeeeeee); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 0xeeeeeeee); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 1); } { int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 }; - rustsecp256k1_v0_4_0_scalar_set_int(&num, 0x01010101); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 0x01010101); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 }; - rustsecp256k1_v0_4_0_scalar_set_int(&num, 0x01ef1ef1); - skew = rustsecp256k1_v0_4_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_4_1_scalar_set_int(&num, 0x01ef1ef1); + skew = rustsecp256k1_v0_4_1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } @@ -3493,7 +4285,7 @@ void test_fixed_wnaf_small(void) { void run_wnaf(void) { int i; - rustsecp256k1_v0_4_0_scalar n = {{0}}; + rustsecp256k1_v0_4_1_scalar n = {{0}}; test_constant_wnaf(&n, 4); /* Sanity check: 1 and 2 are the smallest odd and even numbers and should @@ -3503,21 +4295,21 @@ void run_wnaf(void) { n.d[0] = 2; test_constant_wnaf(&n, 4); /* Test -1, because it's a special case in wnaf_const */ - n = rustsecp256k1_v0_4_0_scalar_one; - rustsecp256k1_v0_4_0_scalar_negate(&n, &n); + n = rustsecp256k1_v0_4_1_scalar_one; + rustsecp256k1_v0_4_1_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test -2, which may not lead to overflows in wnaf_const */ - rustsecp256k1_v0_4_0_scalar_add(&n, &rustsecp256k1_v0_4_0_scalar_one, &rustsecp256k1_v0_4_0_scalar_one); - rustsecp256k1_v0_4_0_scalar_negate(&n, &n); + rustsecp256k1_v0_4_1_scalar_add(&n, &rustsecp256k1_v0_4_1_scalar_one, &rustsecp256k1_v0_4_1_scalar_one); + rustsecp256k1_v0_4_1_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test (1/2) - 1 = 1/-2 and 1/2 = (1/-2) + 1 as corner cases of negation handling in wnaf_const */ - rustsecp256k1_v0_4_0_scalar_inverse(&n, &n); + rustsecp256k1_v0_4_1_scalar_inverse(&n, &n); test_constant_wnaf(&n, 4); - rustsecp256k1_v0_4_0_scalar_add(&n, &n, &rustsecp256k1_v0_4_0_scalar_one); + rustsecp256k1_v0_4_1_scalar_add(&n, &n, &rustsecp256k1_v0_4_1_scalar_one); test_constant_wnaf(&n, 4); /* Test 0 for fixed wnaf */ @@ -3530,43 +4322,43 @@ void run_wnaf(void) { test_constant_wnaf(&n, 4 + (i % 10)); test_fixed_wnaf(&n, 4 + (i % 10)); } - rustsecp256k1_v0_4_0_scalar_set_int(&n, 0); - CHECK(rustsecp256k1_v0_4_0_scalar_cond_negate(&n, 1) == -1); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&n)); - CHECK(rustsecp256k1_v0_4_0_scalar_cond_negate(&n, 0) == 1); - CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&n)); + rustsecp256k1_v0_4_1_scalar_set_int(&n, 0); + CHECK(rustsecp256k1_v0_4_1_scalar_cond_negate(&n, 1) == -1); + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&n)); + CHECK(rustsecp256k1_v0_4_1_scalar_cond_negate(&n, 0) == 1); + CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&n)); } void test_ecmult_constants(void) { /* Test ecmult_gen() for [0..36) and [order-36..0). */ - rustsecp256k1_v0_4_0_scalar x; - rustsecp256k1_v0_4_0_gej r; - rustsecp256k1_v0_4_0_ge ng; + rustsecp256k1_v0_4_1_scalar x; + rustsecp256k1_v0_4_1_gej r; + rustsecp256k1_v0_4_1_ge ng; int i; int j; - rustsecp256k1_v0_4_0_ge_neg(&ng, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_ge_neg(&ng, &rustsecp256k1_v0_4_1_ge_const_g); for (i = 0; i < 36; i++ ) { - rustsecp256k1_v0_4_0_scalar_set_int(&x, i); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); + rustsecp256k1_v0_4_1_scalar_set_int(&x, i); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { - ge_equals_gej(&rustsecp256k1_v0_4_0_ge_const_g, &r); + ge_equals_gej(&rustsecp256k1_v0_4_1_ge_const_g, &r); } - rustsecp256k1_v0_4_0_gej_add_ge(&r, &r, &ng); + rustsecp256k1_v0_4_1_gej_add_ge(&r, &r, &ng); } - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } for (i = 1; i <= 36; i++ ) { - rustsecp256k1_v0_4_0_scalar_set_int(&x, i); - rustsecp256k1_v0_4_0_scalar_negate(&x, &x); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); + rustsecp256k1_v0_4_1_scalar_set_int(&x, i); + rustsecp256k1_v0_4_1_scalar_negate(&x, &x); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { ge_equals_gej(&ng, &r); } - rustsecp256k1_v0_4_0_gej_add_ge(&r, &r, &rustsecp256k1_v0_4_0_ge_const_g); + rustsecp256k1_v0_4_1_gej_add_ge(&r, &r, &rustsecp256k1_v0_4_1_ge_const_g); } - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&r)); } } @@ -3576,36 +4368,36 @@ void run_ecmult_constants(void) { void test_ecmult_gen_blind(void) { /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ - rustsecp256k1_v0_4_0_scalar key; - rustsecp256k1_v0_4_0_scalar b; + rustsecp256k1_v0_4_1_scalar key; + rustsecp256k1_v0_4_1_scalar b; unsigned char seed32[32]; - rustsecp256k1_v0_4_0_gej pgej; - rustsecp256k1_v0_4_0_gej pgej2; - rustsecp256k1_v0_4_0_gej i; - rustsecp256k1_v0_4_0_ge pge; + rustsecp256k1_v0_4_1_gej pgej; + rustsecp256k1_v0_4_1_gej pgej2; + rustsecp256k1_v0_4_1_gej i; + rustsecp256k1_v0_4_1_ge pge; random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); - rustsecp256k1_v0_4_0_testrand256(seed32); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); + rustsecp256k1_v0_4_1_testrand256(seed32); b = ctx->ecmult_gen_ctx.blind; i = ctx->ecmult_gen_ctx.initial; - rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); - CHECK(!rustsecp256k1_v0_4_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); + rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + CHECK(!rustsecp256k1_v0_4_1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial)); - rustsecp256k1_v0_4_0_ge_set_gej(&pge, &pgej); + rustsecp256k1_v0_4_1_ge_set_gej(&pge, &pgej); ge_equals_gej(&pge, &pgej2); } void test_ecmult_gen_blind_reset(void) { /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ - rustsecp256k1_v0_4_0_scalar b; - rustsecp256k1_v0_4_0_gej initial; - rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + rustsecp256k1_v0_4_1_scalar b; + rustsecp256k1_v0_4_1_gej initial; + rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); b = ctx->ecmult_gen_ctx.blind; initial = ctx->ecmult_gen_ctx.initial; - rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial)); } @@ -3618,46 +4410,46 @@ void run_ecmult_gen_blind(void) { } /***** ENDOMORPHISH TESTS *****/ -void test_scalar_split(const rustsecp256k1_v0_4_0_scalar* full) { - rustsecp256k1_v0_4_0_scalar s, s1, slam; +void test_scalar_split(const rustsecp256k1_v0_4_1_scalar* full) { + rustsecp256k1_v0_4_1_scalar s, s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; - rustsecp256k1_v0_4_0_scalar_split_lambda(&s1, &slam, full); + rustsecp256k1_v0_4_1_scalar_split_lambda(&s1, &slam, full); /* check slam*lambda + s1 == full */ - rustsecp256k1_v0_4_0_scalar_mul(&s, &rustsecp256k1_v0_4_0_const_lambda, &slam); - rustsecp256k1_v0_4_0_scalar_add(&s, &s, &s1); - CHECK(rustsecp256k1_v0_4_0_scalar_eq(&s, full)); + rustsecp256k1_v0_4_1_scalar_mul(&s, &rustsecp256k1_v0_4_1_const_lambda, &slam); + rustsecp256k1_v0_4_1_scalar_add(&s, &s, &s1); + CHECK(rustsecp256k1_v0_4_1_scalar_eq(&s, full)); /* check that both are <= 128 bits in size */ - if (rustsecp256k1_v0_4_0_scalar_is_high(&s1)) { - rustsecp256k1_v0_4_0_scalar_negate(&s1, &s1); + if (rustsecp256k1_v0_4_1_scalar_is_high(&s1)) { + rustsecp256k1_v0_4_1_scalar_negate(&s1, &s1); } - if (rustsecp256k1_v0_4_0_scalar_is_high(&slam)) { - rustsecp256k1_v0_4_0_scalar_negate(&slam, &slam); + if (rustsecp256k1_v0_4_1_scalar_is_high(&slam)) { + rustsecp256k1_v0_4_1_scalar_negate(&slam, &slam); } - rustsecp256k1_v0_4_0_scalar_get_b32(tmp, &s1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zero, tmp, 16) == 0); - rustsecp256k1_v0_4_0_scalar_get_b32(tmp, &slam); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_4_1_scalar_get_b32(tmp, &s1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_4_1_scalar_get_b32(tmp, &slam); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zero, tmp, 16) == 0); } void run_endomorphism_tests(void) { unsigned i; - static rustsecp256k1_v0_4_0_scalar s; - test_scalar_split(&rustsecp256k1_v0_4_0_scalar_zero); - test_scalar_split(&rustsecp256k1_v0_4_0_scalar_one); - rustsecp256k1_v0_4_0_scalar_negate(&s,&rustsecp256k1_v0_4_0_scalar_one); + static rustsecp256k1_v0_4_1_scalar s; + test_scalar_split(&rustsecp256k1_v0_4_1_scalar_zero); + test_scalar_split(&rustsecp256k1_v0_4_1_scalar_one); + rustsecp256k1_v0_4_1_scalar_negate(&s,&rustsecp256k1_v0_4_1_scalar_one); test_scalar_split(&s); - test_scalar_split(&rustsecp256k1_v0_4_0_const_lambda); - rustsecp256k1_v0_4_0_scalar_add(&s, &rustsecp256k1_v0_4_0_const_lambda, &rustsecp256k1_v0_4_0_scalar_one); + test_scalar_split(&rustsecp256k1_v0_4_1_const_lambda); + rustsecp256k1_v0_4_1_scalar_add(&s, &rustsecp256k1_v0_4_1_const_lambda, &rustsecp256k1_v0_4_1_scalar_one); test_scalar_split(&s); for (i = 0; i < 100U * count; ++i) { - rustsecp256k1_v0_4_0_scalar full; + rustsecp256k1_v0_4_1_scalar full; random_scalar_order_test(&full); test_scalar_split(&full); } @@ -3668,12 +4460,12 @@ void run_endomorphism_tests(void) { void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_ge ge; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_ge ge; size_t pubkeyclen; int32_t ecount; ecount = 0; - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { /* Smaller sizes are tested exhaustively elsewhere. */ int32_t i; @@ -3699,30 +4491,30 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 33); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ CHECK(pubkeyo[0] == ysign); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - rustsecp256k1_v0_4_0_pubkey_save(&pubkey, &ge); + rustsecp256k1_v0_4_1_pubkey_save(&pubkey, &ge); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkeyo[1], input, 64) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkeyo[1], input, 64) == 0); } CHECK(ecount == 0); } else { @@ -3730,15 +4522,15 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } } } - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, NULL, NULL); } void run_ec_pubkey_parse_test(void) { @@ -3922,8 +4714,8 @@ void run_ec_pubkey_parse_test(void) { }; unsigned char sout[65]; unsigned char shortkey[2]; - rustsecp256k1_v0_4_0_ge ge; - rustsecp256k1_v0_4_0_pubkey pubkey; + rustsecp256k1_v0_4_1_ge ge; + rustsecp256k1_v0_4_1_pubkey pubkey; size_t len; int32_t i; int32_t ecount; @@ -3931,16 +4723,16 @@ void run_ec_pubkey_parse_test(void) { ecount = 0; /* Nothing should be reading this far into pubkeyc. */ VG_UNDEF(&pubkeyc[65], 1); - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); /* Zero length claimed, fail, zeroize, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(shortkey, 2); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Length one claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 256 ; i++) { @@ -3949,10 +4741,10 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i; VG_UNDEF(&shortkey[1], 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } /* Length two claimed, fail, zeroize, no illegal arg error. */ @@ -3962,101 +4754,101 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i & 255; shortkey[1] = i >> 8; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); CHECK(ecount == 2); /* NULL input string. Illegal arg and zeroize output. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 2); /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Valid parse. */ memset(&pubkey, 0, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(rustsecp256k1_v0_4_0_context_no_precomp, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(rustsecp256k1_v0_4_1_context_no_precomp, &pubkey, pubkeyc, 65) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); VG_UNDEF(&ge, sizeof(ge)); - CHECK(rustsecp256k1_v0_4_0_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_pubkey_load(ctx, &ge, &pubkey) == 1); VG_CHECK(&ge.x, sizeof(ge.x)); VG_CHECK(&ge.y, sizeof(ge.y)); VG_CHECK(&ge.infinity, sizeof(ge.infinity)); - ge_equals_ge(&rustsecp256k1_v0_4_0_ge_const_g, &ge); + ge_equals_ge(&rustsecp256k1_v0_4_1_ge_const_g, &ge); CHECK(ecount == 0); - /* rustsecp256k1_v0_4_0_ec_pubkey_serialize illegal args. */ + /* rustsecp256k1_v0_4_1_ec_pubkey_serialize illegal args. */ ecount = 0; len = 65; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 1); CHECK(len == 0); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 2); len = 65; VG_UNDEF(sout, 65); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); VG_CHECK(sout, 65); CHECK(ecount == 3); CHECK(len == 0); len = 65; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); CHECK(ecount == 4); CHECK(len == 0); len = 65; VG_UNDEF(sout, 65); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(sout, 65); CHECK(ecount == 4); CHECK(len == 65); /* Multiple illegal args. Should still set arg error only once. */ ecount = 0; ecount2 = 11; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); /* Does the illegal arg callback actually change the behavior? */ - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); CHECK(ecount2 == 10); - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, NULL, NULL); /* Try a bunch of prefabbed points with all possible encodings. */ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { ec_pubkey_parse_pointtest(valid[i], 1, 1); @@ -4076,253 +4868,253 @@ void run_eckey_edge_case_test(void) { 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - const unsigned char zeros[sizeof(rustsecp256k1_v0_4_0_pubkey)] = {0x00}; + const unsigned char zeros[sizeof(rustsecp256k1_v0_4_1_pubkey)] = {0x00}; unsigned char ctmp[33]; unsigned char ctmp2[33]; - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_pubkey pubkey2; - rustsecp256k1_v0_4_0_pubkey pubkey_one; - rustsecp256k1_v0_4_0_pubkey pubkey_negone; - const rustsecp256k1_v0_4_0_pubkey *pubkeys[3]; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey pubkey2; + rustsecp256k1_v0_4_1_pubkey pubkey_one; + rustsecp256k1_v0_4_1_pubkey pubkey_negone; + const rustsecp256k1_v0_4_1_pubkey *pubkeys[3]; size_t len; int32_t ecount; /* Group order is too large, reject. */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, orderc) == 0); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, orderc) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) > 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x42; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) > 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp, 32) == 0); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing seckey, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); memset(ctmp2, 0, 32); ctmp2[31] = 0x01; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp2) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 0); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp, 32) == 0); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing tweak, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; /* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing tweak, the pubkey is zeroized. */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* If the resulting key in rustsecp256k1_v0_4_0_ec_seckey_tweak_add and - * rustsecp256k1_v0_4_0_ec_pubkey_tweak_add is 0 the functions fail and in the latter + /* If the resulting key in rustsecp256k1_v0_4_1_ec_seckey_tweak_add and + * rustsecp256k1_v0_4_1_ec_pubkey_tweak_add is 0 the functions fail and in the latter * case the pubkey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros, ctmp2, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Test argument errors. */ ecount = 0; - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); CHECK(ecount == 0); /* Zeroize pubkey on parse error. */ memset(&pubkey, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, ctmp) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, NULL) == 0); CHECK(ecount == 1); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, NULL, ctmp) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, NULL, ctmp) == 0); CHECK(ecount == 1); memset(&pubkey, 1, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); - /* rustsecp256k1_v0_4_0_ec_pubkey_combine tests. */ + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); + /* rustsecp256k1_v0_4_1_ec_pubkey_combine tests. */ ecount = 0; pubkeys[0] = &pubkey_one; - VG_UNDEF(&pubkeys[0], sizeof(rustsecp256k1_v0_4_0_pubkey *)); - VG_UNDEF(&pubkeys[1], sizeof(rustsecp256k1_v0_4_0_pubkey *)); - VG_UNDEF(&pubkeys[2], sizeof(rustsecp256k1_v0_4_0_pubkey *)); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + VG_UNDEF(&pubkeys[0], sizeof(rustsecp256k1_v0_4_1_pubkey *)); + VG_UNDEF(&pubkeys[1], sizeof(rustsecp256k1_v0_4_1_pubkey *)); + VG_UNDEF(&pubkeys[2], sizeof(rustsecp256k1_v0_4_1_pubkey *)); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); CHECK(ecount == 2); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); CHECK(ecount == 3); pubkeys[0] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) == 0); CHECK(ecount == 3); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_4_1_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_4_1_pubkey)) > 0); CHECK(ecount == 3); - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, NULL, NULL); } void run_eckey_negate_test(void) { @@ -4333,22 +5125,22 @@ void run_eckey_negate_test(void) { memcpy(seckey_tmp, seckey, 32); /* Verify negation changes the key and changes it back */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(seckey, seckey_tmp, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(seckey, seckey_tmp, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Check that privkey alias gives same result */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_privkey_negate(ctx, seckey_tmp) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_privkey_negate(ctx, seckey_tmp) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating all 0s fails */ memset(seckey, 0, 32); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey) == 0); /* Check that seckey is not modified */ - CHECK(rustsecp256k1_v0_4_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating an overflowing seckey fails and the seckey is zeroed. In this * test, the seckey has 16 random bytes to ensure that ec_seckey_negate @@ -4356,38 +5148,40 @@ void run_eckey_negate_test(void) { random_scalar_order_b32(seckey); memset(seckey, 0xFF, 16); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(seckey, seckey_tmp, 32) == 0); } -void random_sign(rustsecp256k1_v0_4_0_scalar *sigr, rustsecp256k1_v0_4_0_scalar *sigs, const rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *msg, int *recid) { - rustsecp256k1_v0_4_0_scalar nonce; +void random_sign(rustsecp256k1_v0_4_1_scalar *sigr, rustsecp256k1_v0_4_1_scalar *sigs, const rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *msg, int *recid) { + rustsecp256k1_v0_4_1_scalar nonce; do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_4_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); + } while(!rustsecp256k1_v0_4_1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); } void test_ecdsa_sign_verify(void) { - rustsecp256k1_v0_4_0_gej pubj; - rustsecp256k1_v0_4_0_ge pub; - rustsecp256k1_v0_4_0_scalar one; - rustsecp256k1_v0_4_0_scalar msg, key; - rustsecp256k1_v0_4_0_scalar sigr, sigs; - int recid; + rustsecp256k1_v0_4_1_gej pubj; + rustsecp256k1_v0_4_1_ge pub; + rustsecp256k1_v0_4_1_scalar one; + rustsecp256k1_v0_4_1_scalar msg, key; + rustsecp256k1_v0_4_1_scalar sigr, sigs; int getrec; + /* Initialize recid to suppress a false positive -Wconditional-uninitialized in clang. + VG_UNDEF ensures that valgrind will still treat the variable as uninitialized. */ + int recid = -1; VG_UNDEF(&recid, sizeof(recid)); random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_4_0_ge_set_gej(&pub, &pubj); - getrec = rustsecp256k1_v0_4_0_testrand_bits(1); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_4_1_ge_set_gej(&pub, &pubj); + getrec = rustsecp256k1_v0_4_1_testrand_bits(1); random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL); if (getrec) { CHECK(recid >= 0 && recid < 4); } - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - rustsecp256k1_v0_4_0_scalar_set_int(&one, 1); - rustsecp256k1_v0_4_0_scalar_add(&msg, &msg, &one); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); + rustsecp256k1_v0_4_1_scalar_set_int(&one, 1); + rustsecp256k1_v0_4_1_scalar_add(&msg, &msg, &one); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); } void run_ecdsa_sign_verify(void) { @@ -4444,9 +5238,9 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); } -int is_empty_signature(const rustsecp256k1_v0_4_0_ecdsa_signature *sig) { - static const unsigned char res[sizeof(rustsecp256k1_v0_4_0_ecdsa_signature)] = {0}; - return rustsecp256k1_v0_4_0_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_4_0_ecdsa_signature)) == 0; +int is_empty_signature(const rustsecp256k1_v0_4_1_ecdsa_signature *sig) { + static const unsigned char res[sizeof(rustsecp256k1_v0_4_1_ecdsa_signature)] = {0}; + return rustsecp256k1_v0_4_1_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_4_1_ecdsa_signature)) == 0; } void test_ecdsa_end_to_end(void) { @@ -4454,191 +5248,191 @@ void test_ecdsa_end_to_end(void) { unsigned char privkey[32]; unsigned char message[32]; unsigned char privkey2[32]; - rustsecp256k1_v0_4_0_ecdsa_signature signature[6]; - rustsecp256k1_v0_4_0_scalar r, s; + rustsecp256k1_v0_4_1_ecdsa_signature signature[6]; + rustsecp256k1_v0_4_1_scalar r, s; unsigned char sig[74]; size_t siglen = 74; unsigned char pubkeyc[65]; size_t pubkeyclen = 65; - rustsecp256k1_v0_4_0_pubkey pubkey; - rustsecp256k1_v0_4_0_pubkey pubkey_tmp; + rustsecp256k1_v0_4_1_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; /* Generate a random key and message. */ { - rustsecp256k1_v0_4_0_scalar msg, key; + rustsecp256k1_v0_4_1_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_4_0_scalar_get_b32(message, &msg); + rustsecp256k1_v0_4_1_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_4_1_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_4_0_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_4_1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, rustsecp256k1_v0_4_0_testrand_bits(1) == 1)); + CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, rustsecp256k1_v0_4_1_testrand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(privkey, privkey2, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ - if (rustsecp256k1_v0_4_0_testrand_int(3) == 0) { + if (rustsecp256k1_v0_4_1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_4_0_pubkey pubkey2; - rustsecp256k1_v0_4_0_testrand256_test(rnd); + rustsecp256k1_v0_4_1_pubkey pubkey2; + rustsecp256k1_v0_4_1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, privkey, rnd); - ret2 = rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(ctx, &pubkey, rnd); + ret1 = rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_4_0_ec_privkey_tweak_add(ctx, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_4_1_ec_privkey_tweak_add(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_4_0_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ - if (rustsecp256k1_v0_4_0_testrand_int(3) == 0) { + if (rustsecp256k1_v0_4_1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_4_0_pubkey pubkey2; - rustsecp256k1_v0_4_0_testrand256_test(rnd); + rustsecp256k1_v0_4_1_pubkey pubkey2; + rustsecp256k1_v0_4_1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, privkey, rnd); - ret2 = rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); + ret1 = rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(ctx, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_4_0_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); /* Test lower-S form, malleate, verify and fail, test again, malleate again */ - CHECK(!rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, NULL, &signature[0])); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, &signature[0]); - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); - rustsecp256k1_v0_4_0_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - rustsecp256k1_v0_4_0_scalar_negate(&s, &s); - rustsecp256k1_v0_4_0_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[5], &signature[0], 64) == 0); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, NULL, &signature[0])); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, &signature[0]); + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + rustsecp256k1_v0_4_1_scalar_negate(&s, &s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); memset(&signature[0], 0, sizeof(signature[0])); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); /* Serialize/destroy/parse DER and verify again. */ siglen = 74; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - sig[rustsecp256k1_v0_4_0_testrand_int(siglen)] += 1 + rustsecp256k1_v0_4_0_testrand_int(255); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || - rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + sig[rustsecp256k1_v0_4_1_testrand_int(siglen)] += 1 + rustsecp256k1_v0_4_1_testrand_int(255); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || + rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); } void test_random_pubkeys(void) { - rustsecp256k1_v0_4_0_ge elem; - rustsecp256k1_v0_4_0_ge elem2; + rustsecp256k1_v0_4_1_ge elem; + rustsecp256k1_v0_4_1_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ - size_t len = rustsecp256k1_v0_4_0_testrand_bits(2) == 0 ? 65 : 33; - if (rustsecp256k1_v0_4_0_testrand_bits(2) == 0) { - len = rustsecp256k1_v0_4_0_testrand_bits(6); + size_t len = rustsecp256k1_v0_4_1_testrand_bits(2) == 0 ? 65 : 33; + if (rustsecp256k1_v0_4_1_testrand_bits(2) == 0) { + len = rustsecp256k1_v0_4_1_testrand_bits(6); } if (len == 65) { - in[0] = rustsecp256k1_v0_4_0_testrand_bits(1) ? 4 : (rustsecp256k1_v0_4_0_testrand_bits(1) ? 6 : 7); + in[0] = rustsecp256k1_v0_4_1_testrand_bits(1) ? 4 : (rustsecp256k1_v0_4_1_testrand_bits(1) ? 6 : 7); } else { - in[0] = rustsecp256k1_v0_4_0_testrand_bits(1) ? 2 : 3; + in[0] = rustsecp256k1_v0_4_1_testrand_bits(1) ? 2 : 3; } - if (rustsecp256k1_v0_4_0_testrand_bits(3) == 0) { - in[0] = rustsecp256k1_v0_4_0_testrand_bits(8); + if (rustsecp256k1_v0_4_1_testrand_bits(3) == 0) { + in[0] = rustsecp256k1_v0_4_1_testrand_bits(8); } if (len > 1) { - rustsecp256k1_v0_4_0_testrand256(&in[1]); + rustsecp256k1_v0_4_1_testrand256(&in[1]); } if (len > 33) { - rustsecp256k1_v0_4_0_testrand256(&in[33]); + rustsecp256k1_v0_4_1_testrand256(&in[33]); } - if (rustsecp256k1_v0_4_0_eckey_pubkey_parse(&elem, in, len)) { + if (rustsecp256k1_v0_4_1_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; unsigned char firstb; int res; size_t size = len; firstb = in[0]; /* If the pubkey can be parsed, it should round-trip... */ - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&elem, out, &size, len == 33)); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&in[1], &out[1], len-1) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ if ((in[0] != 6) && (in[0] != 7)) { CHECK(in[0] == out[0]); } size = 65; - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&elem, in, &size, 0)); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&elem, in, &size, 0)); CHECK(size == 65); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&elem2, in, size)); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&elem2, in, size)); ge_equals_ge(&elem,&elem2); /* Check that the X9.62 hybrid type is checked. */ - in[0] = rustsecp256k1_v0_4_0_testrand_bits(1) ? 6 : 7; - res = rustsecp256k1_v0_4_0_eckey_pubkey_parse(&elem2, in, size); + in[0] = rustsecp256k1_v0_4_1_testrand_bits(1) ? 6 : 7; + res = rustsecp256k1_v0_4_1_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { CHECK(res); @@ -4648,12 +5442,61 @@ void test_random_pubkeys(void) { } if (res) { ge_equals_ge(&elem,&elem2); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&elem, out, &size, 0)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&in[1], &out[1], 64) == 0); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&elem, out, &size, 0)); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&in[1], &out[1], 64) == 0); } } } +void run_pubkey_comparison(void) { + unsigned char pk1_ser[33] = { + 0x02, + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[33] = { + 0x02, + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + rustsecp256k1_v0_4_1_pubkey pk1; + rustsecp256k1_v0_4_1_pubkey pk2; + int32_t ecount = 0; + + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); + + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0); + CHECK(ecount == 2); + { + rustsecp256k1_v0_4_1_pubkey pk_tmp; + memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */ + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0); + CHECK(ecount == 6); + } + + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, NULL, NULL); + + /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in + * an uncompressed encoding, these would have the opposite ordering */ + pk1_ser[0] = 3; + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); +} + void run_random_pubkeys(void) { int i; for (i = 0; i < 10*count; i++) { @@ -4681,13 +5524,13 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ int ret = 0; - rustsecp256k1_v0_4_0_ecdsa_signature sig_der; + rustsecp256k1_v0_4_1_ecdsa_signature sig_der; unsigned char roundtrip_der[2048]; unsigned char compact_der[64]; size_t len_der = 2048; int parsed_der = 0, valid_der = 0, roundtrips_der = 0; - rustsecp256k1_v0_4_0_ecdsa_signature sig_der_lax; + rustsecp256k1_v0_4_1_ecdsa_signature sig_der_lax; unsigned char roundtrip_der_lax[2048]; unsigned char compact_der_lax[64]; size_t len_der_lax = 2048; @@ -4702,24 +5545,24 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0; #endif - parsed_der = rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); + parsed_der = rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); if (parsed_der) { - ret |= (!rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; - valid_der = (rustsecp256k1_v0_4_0_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_4_0_memcmp_var(compact_der + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; + valid_der = (rustsecp256k1_v0_4_1_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_4_1_memcmp_var(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { - ret |= (!rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; - roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_4_0_memcmp_var(roundtrip_der, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; + roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_4_1_memcmp_var(roundtrip_der, sig, siglen) == 0; } - parsed_der_lax = rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); + parsed_der_lax = rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); if (parsed_der_lax) { - ret |= (!rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; - valid_der_lax = (rustsecp256k1_v0_4_0_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_4_0_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; + valid_der_lax = (rustsecp256k1_v0_4_1_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_4_1_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { - ret |= (!rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; - roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_4_0_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; + roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_4_1_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; } if (certainly_der) { @@ -4735,7 +5578,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ if (valid_der) { ret |= (!roundtrips_der_lax) << 12; ret |= (len_der != len_der_lax) << 13; - ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_4_0_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; + ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_4_1_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; } ret |= (roundtrips_der != roundtrips_der_lax) << 15; if (parsed_der) { @@ -4752,19 +5595,19 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(r, tmp + 32 - BN_num_bytes(r)); - valid_openssl = rustsecp256k1_v0_4_0_memcmp_var(tmp, max_scalar, 32) < 0; + valid_openssl = rustsecp256k1_v0_4_1_memcmp_var(tmp, max_scalar, 32) < 0; } if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(s, tmp + 32 - BN_num_bytes(s)); - valid_openssl = rustsecp256k1_v0_4_0_memcmp_var(tmp, max_scalar, 32) < 0; + valid_openssl = rustsecp256k1_v0_4_1_memcmp_var(tmp, max_scalar, 32) < 0; } } len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL); if (len_openssl <= 2048) { unsigned char *ptr = roundtrip_openssl; CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl); - roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (rustsecp256k1_v0_4_0_memcmp_var(roundtrip_openssl, sig, siglen) == 0); + roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (rustsecp256k1_v0_4_1_memcmp_var(roundtrip_openssl, sig, siglen) == 0); } else { len_openssl = 0; } @@ -4776,7 +5619,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ ret |= (roundtrips_der != roundtrips_openssl) << 7; if (roundtrips_openssl) { ret |= (len_der != (size_t)len_openssl) << 8; - ret |= ((len_der != (size_t)len_openssl) || (rustsecp256k1_v0_4_0_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9; + ret |= ((len_der != (size_t)len_openssl) || (rustsecp256k1_v0_4_1_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9; } #endif return ret; @@ -4796,27 +5639,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; - int action = rustsecp256k1_v0_4_0_testrand_bits(3); + int action = rustsecp256k1_v0_4_1_testrand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ - pos = rustsecp256k1_v0_4_0_testrand_int(*len); + pos = rustsecp256k1_v0_4_1_testrand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ - pos = rustsecp256k1_v0_4_0_testrand_int(1 + *len); + pos = rustsecp256k1_v0_4_1_testrand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = rustsecp256k1_v0_4_0_testrand_bits(8); + sig[pos] = rustsecp256k1_v0_4_1_testrand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ - sig[rustsecp256k1_v0_4_0_testrand_int(*len)] += 1 + rustsecp256k1_v0_4_0_testrand_int(255); + sig[rustsecp256k1_v0_4_1_testrand_int(*len)] += 1 + rustsecp256k1_v0_4_1_testrand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ - sig[rustsecp256k1_v0_4_0_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_4_0_testrand_bits(3); + sig[rustsecp256k1_v0_4_1_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_4_1_testrand_bits(3); return; } } @@ -4829,23 +5672,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly int n; *len = 0; - der = rustsecp256k1_v0_4_0_testrand_bits(2) == 0; + der = rustsecp256k1_v0_4_1_testrand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; - indet = der ? 0 : rustsecp256k1_v0_4_0_testrand_int(10) == 0; + indet = der ? 0 : rustsecp256k1_v0_4_1_testrand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (rustsecp256k1_v0_4_0_testrand_bits(3) != 0); + nlow[n] = der ? 1 : (rustsecp256k1_v0_4_1_testrand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? rustsecp256k1_v0_4_0_testrand_int(33) : 32 + rustsecp256k1_v0_4_0_testrand_int(200) * rustsecp256k1_v0_4_0_testrand_int(8) / 8; + nlen[n] = nlow[n] ? rustsecp256k1_v0_4_1_testrand_int(33) : 32 + rustsecp256k1_v0_4_1_testrand_int(200) * rustsecp256k1_v0_4_1_testrand_int(8) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_4_0_testrand_bits(1)); + nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_4_1_testrand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_4_0_testrand_bits(7) : 1 + rustsecp256k1_v0_4_0_testrand_int(127)); + nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_4_1_testrand_bits(7) : 1 + rustsecp256k1_v0_4_1_testrand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_4_0_testrand_int(3) : rustsecp256k1_v0_4_0_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_4_0_testrand_int(8) / 8); + nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_4_1_testrand_int(3) : rustsecp256k1_v0_4_1_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_4_1_testrand_int(8) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } @@ -4854,7 +5697,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ - int add = rustsecp256k1_v0_4_0_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_4_0_testrand_int(16) * rustsecp256k1_v0_4_0_testrand_int(16) / 256; + int add = rustsecp256k1_v0_4_1_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_4_1_testrand_int(16) * rustsecp256k1_v0_4_1_testrand_int(16) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; @@ -4868,7 +5711,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : rustsecp256k1_v0_4_0_testrand_int(980 - tlen) * rustsecp256k1_v0_4_0_testrand_int(8) / 8; + elen = (der || indet) ? 0 : rustsecp256k1_v0_4_1_testrand_int(980 - tlen) * rustsecp256k1_v0_4_1_testrand_int(8) / 8; if (elen != 0) { *certainly_not_der = 1; } @@ -4876,7 +5719,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : rustsecp256k1_v0_4_0_testrand_int(990 - tlen) * rustsecp256k1_v0_4_0_testrand_int(8) / 8; + glen = der ? 0 : rustsecp256k1_v0_4_1_testrand_int(990 - tlen) * rustsecp256k1_v0_4_1_testrand_int(8) / 8; if (glen != 0) { *certainly_not_der = 1; } @@ -4891,7 +5734,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { - int add = rustsecp256k1_v0_4_0_testrand_int(127 - tlenlen) * rustsecp256k1_v0_4_0_testrand_int(16) * rustsecp256k1_v0_4_0_testrand_int(16) / 256; + int add = rustsecp256k1_v0_4_1_testrand_int(127 - tlenlen) * rustsecp256k1_v0_4_1_testrand_int(16) * rustsecp256k1_v0_4_1_testrand_int(16) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; @@ -4942,13 +5785,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlen[n]--; } /* Generate remaining random bytes of number */ - rustsecp256k1_v0_4_0_testrand_bytes_test(sig + *len, nlen[n]); + rustsecp256k1_v0_4_1_testrand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ - rustsecp256k1_v0_4_0_testrand_bytes_test(sig + *len, elen); + rustsecp256k1_v0_4_1_testrand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ @@ -4960,7 +5803,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ - rustsecp256k1_v0_4_0_testrand_bytes_test(sig + *len, glen); + rustsecp256k1_v0_4_1_testrand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); @@ -5001,22 +5844,22 @@ void run_ecdsa_der_parse(void) { /* Tests several edge cases. */ void test_ecdsa_edge_cases(void) { int t; - rustsecp256k1_v0_4_0_ecdsa_signature sig; + rustsecp256k1_v0_4_1_ecdsa_signature sig; /* Test the case where ECDSA recomputes a point that is infinity. */ { - rustsecp256k1_v0_4_0_gej keyj; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_4_0_scalar_negate(&ss, &ss); - rustsecp256k1_v0_4_0_scalar_inverse(&ss, &ss); - rustsecp256k1_v0_4_0_scalar_set_int(&sr, 1); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); - rustsecp256k1_v0_4_0_ge_set_gej(&key, &keyj); + rustsecp256k1_v0_4_1_gej keyj; + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 1); + rustsecp256k1_v0_4_1_scalar_negate(&ss, &ss); + rustsecp256k1_v0_4_1_scalar_inverse(&ss, &ss); + rustsecp256k1_v0_4_1_scalar_set_int(&sr, 1); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); + rustsecp256k1_v0_4_1_ge_set_gej(&key, &keyj); msg = ss; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with r of zero fails. */ @@ -5028,14 +5871,14 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_4_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&sr, 0); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 1); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&sr, 0); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with s of zero fails. */ @@ -5047,14 +5890,14 @@ void test_ecdsa_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&sr, 1); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&sr, 1); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with message 0 passes. */ @@ -5073,23 +5916,23 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x43 }; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_ge key2; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 2); - rustsecp256k1_v0_4_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_4_0_scalar_set_int(&sr, 2); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_ge key2; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 2); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, 0); + rustsecp256k1_v0_4_1_scalar_set_int(&sr, 2); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message 1 passes. */ @@ -5114,24 +5957,24 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb }; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_ge key2; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_4_0_scalar_set_int(&msg, 1); - rustsecp256k1_v0_4_0_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 2); - rustsecp256k1_v0_4_0_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_ge key2; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 1); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, 1); + rustsecp256k1_v0_4_1_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 2); + rustsecp256k1_v0_4_1_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message -1 passes. */ @@ -5149,25 +5992,25 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee }; - rustsecp256k1_v0_4_0_ge key; - rustsecp256k1_v0_4_0_scalar msg; - rustsecp256k1_v0_4_0_scalar sr, ss; - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_4_0_scalar_set_int(&msg, 1); - rustsecp256k1_v0_4_0_scalar_negate(&msg, &msg); - rustsecp256k1_v0_4_0_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_4_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_4_0_scalar_set_int(&ss, 3); - rustsecp256k1_v0_4_0_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_4_1_ge key; + rustsecp256k1_v0_4_1_scalar msg; + rustsecp256k1_v0_4_1_scalar sr, ss; + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 1); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, 1); + rustsecp256k1_v0_4_1_scalar_negate(&msg, &msg); + rustsecp256k1_v0_4_1_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_4_1_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_4_1_scalar_set_int(&ss, 3); + rustsecp256k1_v0_4_1_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Signature where s would be zero. */ { - rustsecp256k1_v0_4_0_pubkey pubkey; + rustsecp256k1_v0_4_1_pubkey pubkey; size_t siglen; int32_t ecount; unsigned char signature[72]; @@ -5196,71 +6039,71 @@ void test_ecdsa_edge_cases(void) { 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, }; ecount = 0; - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); msg[31] = 0xaa; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, key) == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, key) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg, NULL) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 7); /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); CHECK(ecount == 8); siglen = 72; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); CHECK(ecount == 9); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); CHECK(ecount == 10); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); CHECK(ecount == 12); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); CHECK(ecount == 13); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); CHECK(ecount == 13); siglen = 10; /* Too little room for a signature does not fail via ARGCHECK. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); CHECK(ecount == 13); ecount = 0; - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); CHECK(ecount == 5); memset(signature, 255, 64); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); CHECK(ecount == 5); - rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx, NULL, NULL); } /* Nonce function corner cases. */ @@ -5269,43 +6112,43 @@ void test_ecdsa_edge_cases(void) { int i; unsigned char key[32]; unsigned char msg[32]; - rustsecp256k1_v0_4_0_ecdsa_signature sig2; - rustsecp256k1_v0_4_0_scalar sr[512], ss; + rustsecp256k1_v0_4_1_ecdsa_signature sig2; + rustsecp256k1_v0_4_1_scalar sr[512], ss; const unsigned char *extra; extra = t == 0 ? NULL : zero; memset(msg, 0, 32); msg[31] = 1; /* High key results in signature failure. */ memset(key, 0xFF, 32); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Zero key results in signature failure. */ memset(key, 0, 32); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Nonce function failure results in signature failure. */ key[31] = 1; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); CHECK(is_empty_signature(&sig)); /* The retry loop successfully makes its way to the first good value. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); CHECK(!is_empty_signature(&sig)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; msg[0] = i; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_4_0_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_4_1_scalar_eq(&sr[i], &sr[j])); } } msg[0] = 0; @@ -5314,11 +6157,11 @@ void test_ecdsa_edge_cases(void) { for(i = 256; i < 512; i++) { int j; key[0] = i - 256; - CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_4_0_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_4_1_scalar_eq(&sr[i], &sr[j])); } } key[0] = 0; @@ -5343,12 +6186,12 @@ void test_ecdsa_edge_cases(void) { VG_CHECK(nonce3,32); CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); VG_CHECK(nonce4,32); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce, nonce2, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce2, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce2, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonce3, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce2, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce2, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonce3, nonce4, 32) != 0); } @@ -5377,7 +6220,7 @@ EC_KEY *get_openssl_key(const unsigned char *key32) { unsigned char privkey[300]; size_t privkeylen; const unsigned char* pbegin = privkey; - int compr = rustsecp256k1_v0_4_0_testrand_bits(1); + int compr = rustsecp256k1_v0_4_1_testrand_bits(1); EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1); CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr)); CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen)); @@ -5386,35 +6229,35 @@ EC_KEY *get_openssl_key(const unsigned char *key32) { } void test_ecdsa_openssl(void) { - rustsecp256k1_v0_4_0_gej qj; - rustsecp256k1_v0_4_0_ge q; - rustsecp256k1_v0_4_0_scalar sigr, sigs; - rustsecp256k1_v0_4_0_scalar one; - rustsecp256k1_v0_4_0_scalar msg2; - rustsecp256k1_v0_4_0_scalar key, msg; + rustsecp256k1_v0_4_1_gej qj; + rustsecp256k1_v0_4_1_ge q; + rustsecp256k1_v0_4_1_scalar sigr, sigs; + rustsecp256k1_v0_4_1_scalar one; + rustsecp256k1_v0_4_1_scalar msg2; + rustsecp256k1_v0_4_1_scalar key, msg; EC_KEY *ec_key; unsigned int sigsize = 80; size_t secp_sigsize = 80; unsigned char message[32]; unsigned char signature[80]; unsigned char key32[32]; - rustsecp256k1_v0_4_0_testrand256_test(message); - rustsecp256k1_v0_4_0_scalar_set_b32(&msg, message, NULL); + rustsecp256k1_v0_4_1_testrand256_test(message); + rustsecp256k1_v0_4_1_scalar_set_b32(&msg, message, NULL); random_scalar_order_test(&key); - rustsecp256k1_v0_4_0_scalar_get_b32(key32, &key); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); - rustsecp256k1_v0_4_0_ge_set_gej(&q, &qj); + rustsecp256k1_v0_4_1_scalar_get_b32(key32, &key); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); + rustsecp256k1_v0_4_1_ge_set_gej(&q, &qj); ec_key = get_openssl_key(key32); CHECK(ec_key != NULL); CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); - rustsecp256k1_v0_4_0_scalar_set_int(&one, 1); - rustsecp256k1_v0_4_0_scalar_add(&msg2, &msg, &one); - CHECK(!rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); + rustsecp256k1_v0_4_1_scalar_set_int(&one, 1); + rustsecp256k1_v0_4_1_scalar_add(&msg2, &msg, &one); + CHECK(!rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); random_sign(&sigr, &sigs, &key, &msg, NULL); - CHECK(rustsecp256k1_v0_4_0_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1); EC_KEY_free(ec_key); @@ -5444,166 +6287,166 @@ void run_ecdsa_openssl(void) { # include "modules/schnorrsig/tests_impl.h" #endif -void run_rustsecp256k1_v0_4_0_memczero_test(void) { +void run_rustsecp256k1_v0_4_1_memczero_test(void) { unsigned char buf1[6] = {1, 2, 3, 4, 5, 6}; unsigned char buf2[sizeof(buf1)]; - /* rustsecp256k1_v0_4_0_memczero(..., ..., 0) is a noop. */ + /* rustsecp256k1_v0_4_1_memczero(..., ..., 0) is a noop. */ memcpy(buf2, buf1, sizeof(buf1)); - rustsecp256k1_v0_4_0_memczero(buf1, sizeof(buf1), 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_4_1_memczero(buf1, sizeof(buf1), 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); - /* rustsecp256k1_v0_4_0_memczero(..., ..., 1) zeros the buffer. */ + /* rustsecp256k1_v0_4_1_memczero(..., ..., 1) zeros the buffer. */ memset(buf2, 0, sizeof(buf2)); - rustsecp256k1_v0_4_0_memczero(buf1, sizeof(buf1) , 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_4_1_memczero(buf1, sizeof(buf1) , 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); } void int_cmov_test(void) { int r = INT_MAX; int a = 0; - rustsecp256k1_v0_4_0_int_cmov(&r, &a, 0); + rustsecp256k1_v0_4_1_int_cmov(&r, &a, 0); CHECK(r == INT_MAX); r = 0; a = INT_MAX; - rustsecp256k1_v0_4_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_4_1_int_cmov(&r, &a, 1); CHECK(r == INT_MAX); a = 0; - rustsecp256k1_v0_4_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_4_1_int_cmov(&r, &a, 1); CHECK(r == 0); a = 1; - rustsecp256k1_v0_4_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_4_1_int_cmov(&r, &a, 1); CHECK(r == 1); r = 1; a = 0; - rustsecp256k1_v0_4_0_int_cmov(&r, &a, 0); + rustsecp256k1_v0_4_1_int_cmov(&r, &a, 0); CHECK(r == 1); } void fe_cmov_test(void) { - static const rustsecp256k1_v0_4_0_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_4_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_4_0_fe max = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_4_1_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_4_1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_4_1_fe max = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_4_0_fe r = max; - rustsecp256k1_v0_4_0_fe a = zero; + rustsecp256k1_v0_4_1_fe r = max; + rustsecp256k1_v0_4_1_fe a = zero; - rustsecp256k1_v0_4_0_fe_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_4_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_4_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_4_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_4_0_fe_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); } void fe_storage_cmov_test(void) { - static const rustsecp256k1_v0_4_0_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_4_0_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_4_0_fe_storage max = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_4_1_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_4_1_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_4_1_fe_storage max = SECP256K1_FE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_4_0_fe_storage r = max; - rustsecp256k1_v0_4_0_fe_storage a = zero; + rustsecp256k1_v0_4_1_fe_storage r = max; + rustsecp256k1_v0_4_1_fe_storage a = zero; - rustsecp256k1_v0_4_0_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_4_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_4_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_4_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_4_0_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); } void scalar_cmov_test(void) { - static const rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_4_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_4_0_scalar max = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_4_1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_4_1_scalar max = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_4_0_scalar r = max; - rustsecp256k1_v0_4_0_scalar a = zero; + rustsecp256k1_v0_4_1_scalar r = max; + rustsecp256k1_v0_4_1_scalar a = zero; - rustsecp256k1_v0_4_0_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_4_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_4_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_4_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_4_0_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); } void ge_storage_cmov_test(void) { - static const rustsecp256k1_v0_4_0_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_4_0_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_4_0_ge_storage max = SECP256K1_GE_STORAGE_CONST( + static const rustsecp256k1_v0_4_1_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_4_1_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_4_1_ge_storage max = SECP256K1_GE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_4_0_ge_storage r = max; - rustsecp256k1_v0_4_0_ge_storage a = zero; + rustsecp256k1_v0_4_1_ge_storage r = max; + rustsecp256k1_v0_4_1_ge_storage a = zero; - rustsecp256k1_v0_4_0_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_4_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_4_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_4_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_4_0_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_4_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_4_1_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_4_1_memcmp_var(&r, &one, sizeof(r)) == 0); } void run_cmov_tests(void) { @@ -5628,7 +6471,7 @@ int main(int argc, char **argv) { count = strtol(argv[1], NULL, 0); } else { const char* env = getenv("SECP256K1_TEST_ITERS"); - if (env) { + if (env && strlen(env) > 0) { count = strtol(env, NULL, 0); } } @@ -5639,38 +6482,34 @@ int main(int argc, char **argv) { printf("test count = %i\n", count); /* find random seed */ - rustsecp256k1_v0_4_0_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_4_1_testrand_init(argc > 2 ? argv[2] : NULL); /* initialize */ run_context_tests(0); run_context_tests(1); run_scratch_tests(); - ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - if (rustsecp256k1_v0_4_0_testrand_bits(1)) { + ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + if (rustsecp256k1_v0_4_1_testrand_bits(1)) { unsigned char rand32[32]; - rustsecp256k1_v0_4_0_testrand256(rand32); - CHECK(rustsecp256k1_v0_4_0_context_randomize(ctx, rustsecp256k1_v0_4_0_testrand_bits(1) ? rand32 : NULL)); + rustsecp256k1_v0_4_1_testrand256(rand32); + CHECK(rustsecp256k1_v0_4_1_context_randomize(ctx, rustsecp256k1_v0_4_1_testrand_bits(1) ? rand32 : NULL)); } run_rand_bits(); run_rand_int(); + run_ctz_tests(); + run_modinv_tests(); + run_inverse_tests(); + run_sha256_tests(); run_hmac_sha256_tests(); run_rfc6979_hmac_sha256_tests(); -#ifndef USE_NUM_NONE - /* num tests */ - run_num_smalltests(); -#endif - /* scalar tests */ run_scalar_tests(); /* field tests */ - run_field_inv(); - run_field_inv_var(); - run_field_inv_all_var(); run_field_misc(); run_field_convert(); run_sqr(); @@ -5709,6 +6548,7 @@ int main(int argc, char **argv) { #endif /* ecdsa tests */ + run_pubkey_comparison(); run_random_pubkeys(); run_ecdsa_der_parse(); run_ecdsa_sign_verify(); @@ -5732,14 +6572,14 @@ int main(int argc, char **argv) { #endif /* util tests */ - run_rustsecp256k1_v0_4_0_memczero_test(); + run_rustsecp256k1_v0_4_1_memczero_test(); run_cmov_tests(); - rustsecp256k1_v0_4_0_testrand_finish(); + rustsecp256k1_v0_4_1_testrand_finish(); /* shutdown */ - rustsecp256k1_v0_4_0_context_destroy(ctx); + rustsecp256k1_v0_4_1_context_destroy(ctx); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c index 8030b7d..d430460 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c +++ b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c @@ -10,7 +10,6 @@ #include #include - #include #undef USE_ECMULT_STATIC_PRECOMPUTATION @@ -20,46 +19,46 @@ #define EXHAUSTIVE_TEST_ORDER 13 #endif -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "group.h" -#include "secp256k1.c" #include "testrand_impl.h" static int count = 2; /** stolen from tests.c */ -void ge_equals_ge(const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&a->y, &b->y)); } -void ge_equals_gej(const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_gej *b) { - rustsecp256k1_v0_4_0_fe z2s; - rustsecp256k1_v0_4_0_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_gej *b) { + rustsecp256k1_v0_4_1_fe z2s; + rustsecp256k1_v0_4_1_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_4_0_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_4_0_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u2); - rustsecp256k1_v0_4_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_4_0_fe_mul(&s1, &s1, &b->z); - s2 = b->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s2); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&u1, &u2)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_4_1_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_4_1_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u2); + rustsecp256k1_v0_4_1_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_4_1_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&s1, &s2)); } -void random_fe(rustsecp256k1_v0_4_0_fe *x) { +void random_fe(rustsecp256k1_v0_4_1_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_4_0_testrand256(bin); - if (rustsecp256k1_v0_4_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_4_1_testrand256(bin); + if (rustsecp256k1_v0_4_1_fe_set_b32(x, bin)) { return; } } while(1); @@ -75,10 +74,10 @@ SECP256K1_INLINE static int skip_section(uint64_t* iter) { return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; } -int rustsecp256k1_v0_4_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +int rustsecp256k1_v0_4_1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { - rustsecp256k1_v0_4_0_scalar s; + rustsecp256k1_v0_4_1_scalar s; int *idata = data; (void)msg32; (void)key32; @@ -90,97 +89,97 @@ int rustsecp256k1_v0_4_0_nonce_function_smallint(unsigned char *nonce32, const u if (attempt > 0) { *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } - rustsecp256k1_v0_4_0_scalar_set_int(&s, *idata); - rustsecp256k1_v0_4_0_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_4_1_scalar_set_int(&s, *idata); + rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &s); return 1; } -void test_exhaustive_endomorphism(const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_endomorphism(const rustsecp256k1_v0_4_1_ge *group) { int i; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_ge res; - rustsecp256k1_v0_4_0_ge_mul_lambda(&res, &group[i]); + rustsecp256k1_v0_4_1_ge res; + rustsecp256k1_v0_4_1_ge_mul_lambda(&res, &group[i]); ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); } } -void test_exhaustive_addition(const rustsecp256k1_v0_4_0_ge *group, const rustsecp256k1_v0_4_0_gej *groupj) { +void test_exhaustive_addition(const rustsecp256k1_v0_4_1_ge *group, const rustsecp256k1_v0_4_1_gej *groupj) { int i, j; uint64_t iter = 0; /* Sanity-check (and check infinity functions) */ - CHECK(rustsecp256k1_v0_4_0_ge_is_infinity(&group[0])); - CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&groupj[0])); + CHECK(rustsecp256k1_v0_4_1_ge_is_infinity(&group[0])); + CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&groupj[0])); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(&group[i])); - CHECK(!rustsecp256k1_v0_4_0_gej_is_infinity(&groupj[i])); + CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(&group[i])); + CHECK(!rustsecp256k1_v0_4_1_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { - rustsecp256k1_v0_4_0_fe fe_inv; + rustsecp256k1_v0_4_1_fe fe_inv; if (skip_section(&iter)) continue; - rustsecp256k1_v0_4_0_fe_inv(&fe_inv, &groupj[j].z); + rustsecp256k1_v0_4_1_fe_inv(&fe_inv, &groupj[j].z); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_ge zless_gej; - rustsecp256k1_v0_4_0_gej tmp; + rustsecp256k1_v0_4_1_ge zless_gej; + rustsecp256k1_v0_4_1_gej tmp; /* add_var */ - rustsecp256k1_v0_4_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + rustsecp256k1_v0_4_1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_ge */ if (j > 0) { - rustsecp256k1_v0_4_0_gej_add_ge(&tmp, &groupj[i], &group[j]); + rustsecp256k1_v0_4_1_gej_add_ge(&tmp, &groupj[i], &group[j]); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* add_ge_var */ - rustsecp256k1_v0_4_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + rustsecp256k1_v0_4_1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; - rustsecp256k1_v0_4_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + rustsecp256k1_v0_4_1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } /* Check doubling */ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_gej tmp; - rustsecp256k1_v0_4_0_gej_double(&tmp, &groupj[i]); + rustsecp256k1_v0_4_1_gej tmp; + rustsecp256k1_v0_4_1_gej_double(&tmp, &groupj[i]); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); - rustsecp256k1_v0_4_0_gej_double_var(&tmp, &groupj[i], NULL); + rustsecp256k1_v0_4_1_gej_double_var(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* Check negation */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_ge tmp; - rustsecp256k1_v0_4_0_gej tmpj; - rustsecp256k1_v0_4_0_ge_neg(&tmp, &group[i]); + rustsecp256k1_v0_4_1_ge tmp; + rustsecp256k1_v0_4_1_gej tmpj; + rustsecp256k1_v0_4_1_ge_neg(&tmp, &group[i]); ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp); - rustsecp256k1_v0_4_0_gej_neg(&tmpj, &groupj[i]); + rustsecp256k1_v0_4_1_gej_neg(&tmpj, &groupj[i]); ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj); } } -void test_exhaustive_ecmult(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group, const rustsecp256k1_v0_4_0_gej *groupj) { +void test_exhaustive_ecmult(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group, const rustsecp256k1_v0_4_1_gej *groupj) { int i, j, r_log; uint64_t iter = 0; for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { if (skip_section(&iter)) continue; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_gej tmp; - rustsecp256k1_v0_4_0_scalar na, ng; - rustsecp256k1_v0_4_0_scalar_set_int(&na, i); - rustsecp256k1_v0_4_0_scalar_set_int(&ng, j); + rustsecp256k1_v0_4_1_gej tmp; + rustsecp256k1_v0_4_1_scalar na, ng; + rustsecp256k1_v0_4_1_scalar_set_int(&na, i); + rustsecp256k1_v0_4_1_scalar_set_int(&ng, j); - rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); + rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp); if (i > 0) { - rustsecp256k1_v0_4_0_ecmult_const(&tmp, &group[i], &ng, 256); + rustsecp256k1_v0_4_1_ecmult_const(&tmp, &group[i], &ng, 256); ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } @@ -189,111 +188,111 @@ void test_exhaustive_ecmult(const rustsecp256k1_v0_4_0_context *ctx, const rusts } typedef struct { - rustsecp256k1_v0_4_0_scalar sc[2]; - rustsecp256k1_v0_4_0_ge pt[2]; + rustsecp256k1_v0_4_1_scalar sc[2]; + rustsecp256k1_v0_4_1_ge pt[2]; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_4_0_scalar *sc, rustsecp256k1_v0_4_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_4_1_scalar *sc, rustsecp256k1_v0_4_1_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { int i, j, k, x, y; uint64_t iter = 0; - rustsecp256k1_v0_4_0_scratch *scratch = rustsecp256k1_v0_4_0_scratch_create(&ctx->error_callback, 4096); + rustsecp256k1_v0_4_1_scratch *scratch = rustsecp256k1_v0_4_1_scratch_create(&ctx->error_callback, 4096); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { if (skip_section(&iter)) continue; for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { - rustsecp256k1_v0_4_0_gej tmp; - rustsecp256k1_v0_4_0_scalar g_sc; + rustsecp256k1_v0_4_1_gej tmp; + rustsecp256k1_v0_4_1_scalar g_sc; ecmult_multi_data data; - rustsecp256k1_v0_4_0_scalar_set_int(&data.sc[0], i); - rustsecp256k1_v0_4_0_scalar_set_int(&data.sc[1], j); - rustsecp256k1_v0_4_0_scalar_set_int(&g_sc, k); + rustsecp256k1_v0_4_1_scalar_set_int(&data.sc[0], i); + rustsecp256k1_v0_4_1_scalar_set_int(&data.sc[1], j); + rustsecp256k1_v0_4_1_scalar_set_int(&g_sc, k); data.pt[0] = group[x]; data.pt[1] = group[y]; - rustsecp256k1_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); + rustsecp256k1_v0_4_1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } } } - rustsecp256k1_v0_4_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_4_1_scratch_destroy(&ctx->error_callback, scratch); } -void r_from_k(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_ge *group, int k, int* overflow) { - rustsecp256k1_v0_4_0_fe x; +void r_from_k(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_ge *group, int k, int* overflow) { + rustsecp256k1_v0_4_1_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; - rustsecp256k1_v0_4_0_fe_normalize(&x); - rustsecp256k1_v0_4_0_fe_get_b32(x_bin, &x); - rustsecp256k1_v0_4_0_scalar_set_b32(r, x_bin, overflow); + rustsecp256k1_v0_4_1_fe_normalize(&x); + rustsecp256k1_v0_4_1_fe_get_b32(x_bin, &x); + rustsecp256k1_v0_4_1_scalar_set_b32(r, x_bin, overflow); } -void test_exhaustive_verify(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_verify(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { int s, r, msg, key; uint64_t iter = 0; for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_4_0_ge nonconst_ge; - rustsecp256k1_v0_4_0_ecdsa_signature sig; - rustsecp256k1_v0_4_0_pubkey pk; - rustsecp256k1_v0_4_0_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_4_0_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_4_1_ge nonconst_ge; + rustsecp256k1_v0_4_1_ecdsa_signature sig; + rustsecp256k1_v0_4_1_pubkey pk; + rustsecp256k1_v0_4_1_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_4_1_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_4_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_4_0_scalar_set_int(&r_s, r); - rustsecp256k1_v0_4_0_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_4_0_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_4_1_scalar_set_int(&s_s, s); + rustsecp256k1_v0_4_1_scalar_set_int(&r_s, r); + rustsecp256k1_v0_4_1_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_4_1_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_4_0_scalar check_x_s; + rustsecp256k1_v0_4_1_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_4_0_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_4_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_4_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_4_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_4_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_4_1_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_4_1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_4_1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_4_1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_4_1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_4_0_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_4_1_scalar_is_high(&s_s); /* Verify by calling verify */ - rustsecp256k1_v0_4_0_ecdsa_signature_save(&sig, &r_s, &s_s); + rustsecp256k1_v0_4_1_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_4_0_pubkey_save(&pk, &nonconst_ge); - rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_4_1_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == - rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -void test_exhaustive_sign(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) { +void test_exhaustive_sign(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) { int i, j, k; uint64_t iter = 0; @@ -303,17 +302,17 @@ void test_exhaustive_sign(const rustsecp256k1_v0_4_0_context *ctx, const rustsec if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; - rustsecp256k1_v0_4_0_ecdsa_signature sig; - rustsecp256k1_v0_4_0_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_4_1_ecdsa_signature sig; + rustsecp256k1_v0_4_1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; - rustsecp256k1_v0_4_0_scalar_set_int(&msg, i); - rustsecp256k1_v0_4_0_scalar_set_int(&sk, j); - rustsecp256k1_v0_4_0_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_4_1_scalar_set_int(&msg, i); + rustsecp256k1_v0_4_1_scalar_set_int(&sk, j); + rustsecp256k1_v0_4_1_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg); - rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_4_0_nonce_function_smallint, &k); + rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_4_1_nonce_function_smallint, &k); - rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -354,10 +353,10 @@ void test_exhaustive_sign(const rustsecp256k1_v0_4_0_context *ctx, const rustsec int main(int argc, char** argv) { int i; - rustsecp256k1_v0_4_0_gej groupj[EXHAUSTIVE_TEST_ORDER]; - rustsecp256k1_v0_4_0_ge group[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_4_1_gej groupj[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_4_1_ge group[EXHAUSTIVE_TEST_ORDER]; unsigned char rand32[32]; - rustsecp256k1_v0_4_0_context *ctx; + rustsecp256k1_v0_4_1_context *ctx; /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because @@ -376,7 +375,7 @@ int main(int argc, char** argv) { printf("test count = %i\n", count); /* find random seed */ - rustsecp256k1_v0_4_0_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_4_1_testrand_init(argc > 2 ? argv[2] : NULL); /* set up split processing */ if (argc > 4) { @@ -391,38 +390,38 @@ int main(int argc, char** argv) { while (count--) { /* Build context */ - ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_4_0_testrand256(rand32); - CHECK(rustsecp256k1_v0_4_0_context_randomize(ctx, rand32)); + ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + rustsecp256k1_v0_4_1_testrand256(rand32); + CHECK(rustsecp256k1_v0_4_1_context_randomize(ctx, rand32)); /* Generate the entire group */ - rustsecp256k1_v0_4_0_gej_set_infinity(&groupj[0]); - rustsecp256k1_v0_4_0_ge_set_gej(&group[0], &groupj[0]); + rustsecp256k1_v0_4_1_gej_set_infinity(&groupj[0]); + rustsecp256k1_v0_4_1_ge_set_gej(&group[0], &groupj[0]); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_4_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_4_0_ge_const_g); - rustsecp256k1_v0_4_0_ge_set_gej(&group[i], &groupj[i]); + rustsecp256k1_v0_4_1_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_4_1_ge_const_g); + rustsecp256k1_v0_4_1_ge_set_gej(&group[i], &groupj[i]); if (count != 0) { /* Set a different random z-value for each Jacobian point, except z=1 is used in the last iteration. */ - rustsecp256k1_v0_4_0_fe z; + rustsecp256k1_v0_4_1_fe z; random_fe(&z); - rustsecp256k1_v0_4_0_gej_rescale(&groupj[i], &z); + rustsecp256k1_v0_4_1_gej_rescale(&groupj[i], &z); } /* Verify against ecmult_gen */ { - rustsecp256k1_v0_4_0_scalar scalar_i; - rustsecp256k1_v0_4_0_gej generatedj; - rustsecp256k1_v0_4_0_ge generated; + rustsecp256k1_v0_4_1_scalar scalar_i; + rustsecp256k1_v0_4_1_gej generatedj; + rustsecp256k1_v0_4_1_ge generated; - rustsecp256k1_v0_4_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - rustsecp256k1_v0_4_0_ge_set_gej(&generated, &generatedj); + rustsecp256k1_v0_4_1_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + rustsecp256k1_v0_4_1_ge_set_gej(&generated, &generatedj); CHECK(group[i].infinity == 0); CHECK(generated.infinity == 0); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&generated.x, &group[i].x)); - CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&generated.y, &group[i].y)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&generated.x, &group[i].x)); + CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&generated.y, &group[i].y)); } } @@ -444,10 +443,10 @@ int main(int argc, char** argv) { test_exhaustive_schnorrsig(ctx); #endif - rustsecp256k1_v0_4_0_context_destroy(ctx); + rustsecp256k1_v0_4_1_context_destroy(ctx); } - rustsecp256k1_v0_4_0_testrand_finish(); + rustsecp256k1_v0_4_1_testrand_finish(); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/util.h b/secp256k1-sys/depend/secp256k1/src/util.h index 05c6df7..c21b3d2 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h +++ b/secp256k1-sys/depend/secp256k1/src/util.h @@ -19,9 +19,9 @@ typedef struct { void (*fn)(const char *text, void* data); const void* data; -} rustsecp256k1_v0_4_0_callback; +} rustsecp256k1_v0_4_1_callback; -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_callback_call(const rustsecp256k1_v0_4_0_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_callback_call(const rustsecp256k1_v0_4_1_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } @@ -97,7 +97,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_callback_call(const rustsecp25 #define ALIGNMENT 16 #endif -#define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) +#define ROUND_TO_ALIGN(size) ((((size) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) /* Assume there is a contiguous memory object with bounds [base, base + max_size) * of which the memory range [base, *prealloc_ptr) is already allocated for usage, @@ -125,7 +125,7 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0); VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size); ret = *prealloc_ptr; - *((unsigned char**)prealloc_ptr) += aligned_alloc_size; + *prealloc_ptr = (unsigned char*)*prealloc_ptr + aligned_alloc_size; return ret; } @@ -186,7 +186,7 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz #endif /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_memczero(void *s, size_t len, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_memczero(void *s, size_t len, int flag) { unsigned char *p = (unsigned char *)s; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -205,7 +205,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_memczero(void *s, size_t len, * We use this to avoid possible compiler bugs with memcmp, e.g. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 */ -static SECP256K1_INLINE int rustsecp256k1_v0_4_0_memcmp_var(const void *s1, const void *s2, size_t n) { +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_memcmp_var(const void *s1, const void *s2, size_t n) { const unsigned char *p1 = s1, *p2 = s2; size_t i; @@ -219,7 +219,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_4_0_memcmp_var(const void *s1, cons } /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_int_cmov(int *r, const int *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -260,4 +260,69 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; # endif #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_4_1_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var_debruijn(uint32_t x) { + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B + }; + return debruijn[((x & -x) * 0x04D7651F) >> 27]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_4_1_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var_debruijn(uint64_t x) { + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + return debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var(uint32_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ + if (((unsigned)UINT32_MAX) == UINT32_MAX) { + return __builtin_ctz(x); + } +#endif +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */ + return __builtin_ctzl(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_4_1_ctz32_var_debruijn(x); +#endif +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var(uint64_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ + if (((unsigned long)UINT64_MAX) == UINT64_MAX) { + return __builtin_ctzl(x); + } +#endif +#if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */ + return __builtin_ctzll(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_4_1_ctz64_var_debruijn(x); +#endif +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/util.h.orig b/secp256k1-sys/depend/secp256k1/src/util.h.orig index dc16cf7..57f2184 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h.orig +++ b/secp256k1-sys/depend/secp256k1/src/util.h.orig @@ -19,9 +19,9 @@ typedef struct { void (*fn)(const char *text, void* data); const void* data; -} rustsecp256k1_v0_4_0_callback; +} rustsecp256k1_v0_4_1_callback; -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_callback_call(const rustsecp256k1_v0_4_0_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_callback_call(const rustsecp256k1_v0_4_1_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } @@ -88,18 +88,18 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_callback_call(const rustsecp25 #define VG_CHECK_VERIFY(x,y) #endif -static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_4_0_callback* cb, size_t size) { +static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_4_1_callback* cb, size_t size) { void *ret = malloc(size); if (ret == NULL) { - rustsecp256k1_v0_4_0_callback_call(cb, "Out of memory"); + rustsecp256k1_v0_4_1_callback_call(cb, "Out of memory"); } return ret; } -static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_4_0_callback* cb, void *ptr, size_t size) { +static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_4_1_callback* cb, void *ptr, size_t size) { void *ret = realloc(ptr, size); if (ret == NULL) { - rustsecp256k1_v0_4_0_callback_call(cb, "Out of memory"); + rustsecp256k1_v0_4_1_callback_call(cb, "Out of memory"); } return ret; } @@ -113,7 +113,7 @@ static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_4_0_callbac #define ALIGNMENT 16 #endif -#define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) +#define ROUND_TO_ALIGN(size) ((((size) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) /* Assume there is a contiguous memory object with bounds [base, base + max_size) * of which the memory range [base, *prealloc_ptr) is already allocated for usage, @@ -141,7 +141,7 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0); VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size); ret = *prealloc_ptr; - *((unsigned char**)prealloc_ptr) += aligned_alloc_size; + *prealloc_ptr = (unsigned char*)*prealloc_ptr + aligned_alloc_size; return ret; } @@ -202,7 +202,7 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz #endif /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_memczero(void *s, size_t len, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_memczero(void *s, size_t len, int flag) { unsigned char *p = (unsigned char *)s; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -221,7 +221,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_memczero(void *s, size_t len, * We use this to avoid possible compiler bugs with memcmp, e.g. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 */ -static SECP256K1_INLINE int rustsecp256k1_v0_4_0_memcmp_var(const void *s1, const void *s2, size_t n) { +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_memcmp_var(const void *s1, const void *s2, size_t n) { const unsigned char *p1 = s1, *p2 = s2; size_t i; @@ -235,7 +235,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_4_0_memcmp_var(const void *s1, cons } /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ -static SECP256K1_INLINE void rustsecp256k1_v0_4_0_int_cmov(int *r, const int *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_4_1_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -276,4 +276,69 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; # endif #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_4_1_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var_debruijn(uint32_t x) { + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B + }; + return debruijn[((x & -x) * 0x04D7651F) >> 27]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1_v0_4_1_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var_debruijn(uint64_t x) { + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + return debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var(uint32_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ + if (((unsigned)UINT32_MAX) == UINT32_MAX) { + return __builtin_ctz(x); + } +#endif +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */ + return __builtin_ctzl(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_4_1_ctz32_var_debruijn(x); +#endif +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ +static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var(uint64_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ + if (((unsigned long)UINT64_MAX) == UINT64_MAX) { + return __builtin_ctzl(x); + } +#endif +#if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */ + return __builtin_ctzll(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1_v0_4_1_ctz64_var_debruijn(x); +#endif +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c b/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c index 394c5f5..93fe70f 100644 --- a/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c +++ b/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c @@ -5,86 +5,105 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include + +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #ifdef ENABLE_MODULE_ECDH -# include "include/rustsecp256k1_v0_4_0_ecdh.h" +# include "../include/rustsecp256k1_v0_4_1_ecdh.h" #endif #ifdef ENABLE_MODULE_RECOVERY -# include "include/rustsecp256k1_v0_4_0_recovery.h" +# include "../include/rustsecp256k1_v0_4_1_recovery.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -# include "include/rustsecp256k1_v0_4_0_extrakeys.h" +# include "../include/rustsecp256k1_v0_4_1_extrakeys.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1_schnorrsig.h" #endif +void run_tests(rustsecp256k1_v0_4_1_context *ctx, unsigned char *key); + int main(void) { - rustsecp256k1_v0_4_0_context* ctx; - rustsecp256k1_v0_4_0_ecdsa_signature signature; - rustsecp256k1_v0_4_0_pubkey pubkey; - size_t siglen = 74; - size_t outputlen = 33; - int i; - int ret; - unsigned char msg[32]; + rustsecp256k1_v0_4_1_context* ctx; unsigned char key[32]; - unsigned char sig[74]; - unsigned char spubkey[33]; -#ifdef ENABLE_MODULE_RECOVERY - rustsecp256k1_v0_4_0_ecdsa_recoverable_signature recoverable_signature; - int recid; -#endif -#ifdef ENABLE_MODULE_EXTRAKEYS - rustsecp256k1_v0_4_0_keypair keypair; -#endif + int ret, i; if (!RUNNING_ON_VALGRIND) { fprintf(stderr, "This test can only usefully be run inside valgrind.\n"); fprintf(stderr, "Usage: libtool --mode=execute valgrind ./valgrind_ctime_test\n"); - exit(1); + return 1; } - + ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN + | SECP256K1_CONTEXT_VERIFY + | SECP256K1_CONTEXT_DECLASSIFY); /** In theory, testing with a single secret input should be sufficient: * If control flow depended on secrets the tool would generate an error. */ for (i = 0; i < 32; i++) { key[i] = i + 65; } + + run_tests(ctx, key); + + /* Test context randomisation. Do this last because it leaves the context + * tainted. */ + VALGRIND_MAKE_MEM_UNDEFINED(key, 32); + ret = rustsecp256k1_v0_4_1_context_randomize(ctx, key); + VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); + CHECK(ret); + + rustsecp256k1_v0_4_1_context_destroy(ctx); + return 0; +} + +void run_tests(rustsecp256k1_v0_4_1_context *ctx, unsigned char *key) { + rustsecp256k1_v0_4_1_ecdsa_signature signature; + rustsecp256k1_v0_4_1_pubkey pubkey; + size_t siglen = 74; + size_t outputlen = 33; + int i; + int ret; + unsigned char msg[32]; + unsigned char sig[74]; + unsigned char spubkey[33]; +#ifdef ENABLE_MODULE_RECOVERY + rustsecp256k1_v0_4_1_ecdsa_recoverable_signature recoverable_signature; + int recid; +#endif +#ifdef ENABLE_MODULE_EXTRAKEYS + rustsecp256k1_v0_4_1_keypair keypair; +#endif + for (i = 0; i < 32; i++) { msg[i] = i + 1; } - ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN - | SECP256K1_CONTEXT_VERIFY - | SECP256K1_CONTEXT_DECLASSIFY); - /* Test keygen. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, key); - VALGRIND_MAKE_MEM_DEFINED(&pubkey, sizeof(rustsecp256k1_v0_4_0_pubkey)); + ret = rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, key); + VALGRIND_MAKE_MEM_DEFINED(&pubkey, sizeof(rustsecp256k1_v0_4_1_pubkey)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); /* Test signing. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); - VALGRIND_MAKE_MEM_DEFINED(&signature, sizeof(rustsecp256k1_v0_4_0_ecdsa_signature)); + ret = rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); + VALGRIND_MAKE_MEM_DEFINED(&signature, sizeof(rustsecp256k1_v0_4_1_ecdsa_signature)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); #ifdef ENABLE_MODULE_ECDH /* Test ECDH. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ecdh(ctx, msg, &pubkey, key, NULL, NULL); + ret = rustsecp256k1_v0_4_1_ecdh(ctx, msg, &pubkey, key, NULL, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif @@ -92,66 +111,63 @@ int main(void) { #ifdef ENABLE_MODULE_RECOVERY /* Test signing a recoverable signature. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); + ret = rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); VALGRIND_MAKE_MEM_DEFINED(&recoverable_signature, sizeof(recoverable_signature)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); + CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); CHECK(recid >= 0 && recid <= 3); #endif VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, key); + ret = rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, key); + ret = rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); VALGRIND_MAKE_MEM_UNDEFINED(msg, 32); - ret = rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, key, msg); + ret = rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, key, msg); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); VALGRIND_MAKE_MEM_UNDEFINED(msg, 32); - ret = rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, key, msg); + ret = rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, key, msg); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); - /* Test context randomisation. Do this last because it leaves the context tainted. */ - VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_context_randomize(ctx, key); - VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); - CHECK(ret); - /* Test keypair_create and keypair_xonly_tweak_add. */ #ifdef ENABLE_MODULE_EXTRAKEYS VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); /* The tweak is not treated as a secret in keypair_tweak_add */ VALGRIND_MAKE_MEM_DEFINED(msg, 32); - ret = rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, msg); + ret = rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, msg); + VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); + CHECK(ret == 1); + + VALGRIND_MAKE_MEM_UNDEFINED(key, 32); + VALGRIND_MAKE_MEM_UNDEFINED(&keypair, sizeof(keypair)); + ret = rustsecp256k1_v0_4_1_keypair_sec(ctx, key, &keypair); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif #ifdef ENABLE_MODULE_SCHNORRSIG VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); - ret = rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL); + ret = rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif - - rustsecp256k1_v0_4_0_context_destroy(ctx); - return 0; } diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index 55aeac2..330ca5b 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -259,91 +259,91 @@ impl hash::Hash for KeyPair { extern "C" { /// Default ECDH hash function - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdh_hash_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdh_hash_function_default")] pub static secp256k1_ecdh_hash_function_default: EcdhHashFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_nonce_function_rfc6979")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_nonce_function_rfc6979")] pub static secp256k1_nonce_function_rfc6979: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_nonce_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_nonce_function_default")] pub static secp256k1_nonce_function_default: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_nonce_function_bip340")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_nonce_function_bip340")] pub static secp256k1_nonce_function_bip340: SchnorrNonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_no_precomp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_no_precomp")] pub static secp256k1_context_no_precomp: *const Context; // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_preallocated_destroy")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_preallocated_destroy")] pub fn secp256k1_context_preallocated_destroy(cx: *mut Context); // Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_parse_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_parse_der")] pub fn secp256k1_ecdsa_signature_parse_der(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact")] pub fn secp256k1_ecdsa_signature_parse_compact(cx: *const Context, sig: *mut Signature, input64: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax")] pub fn ecdsa_signature_parse_der_lax(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der")] pub fn secp256k1_ecdsa_signature_serialize_der(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact")] pub fn secp256k1_ecdsa_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_signature_normalize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_signature_normalize")] pub fn secp256k1_ecdsa_signature_normalize(cx: *const Context, out_sig: *mut Signature, in_sig: *const Signature) -> c_int; // Secret Keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_seckey_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_seckey_verify")] pub fn secp256k1_ec_seckey_verify(cx: *const Context, sk: *const c_uchar) -> c_int; #[deprecated(since = "0.2.0",note = "Please use the secp256k1_ec_seckey_tweak_add function instead")] - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_privkey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_privkey_negate")] pub fn secp256k1_ec_privkey_negate(cx: *const Context, sk: *mut c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_seckey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_seckey_negate")] pub fn secp256k1_ec_seckey_negate(cx: *const Context, sk: *mut c_uchar) -> c_int; #[deprecated(since = "0.2.0",note = "Please use the secp256k1_ec_seckey_tweak_add function instead")] - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_privkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_privkey_tweak_add")] pub fn secp256k1_ec_privkey_tweak_add(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_seckey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_seckey_tweak_add")] pub fn secp256k1_ec_seckey_tweak_add(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; #[deprecated(since = "0.2.0",note = "Please use the secp256k1_ec_seckey_tweak_mul function instead")] - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_privkey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_privkey_tweak_mul")] pub fn secp256k1_ec_privkey_tweak_mul(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_seckey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_seckey_tweak_mul")] pub fn secp256k1_ec_seckey_tweak_mul(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) @@ -353,65 +353,65 @@ extern "C" { #[cfg(not(fuzzing))] extern "C" { // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_preallocated_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_preallocated_size")] pub fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_preallocated_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_preallocated_create")] pub fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_preallocated_clone_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_preallocated_clone_size")] pub fn secp256k1_context_preallocated_clone_size(cx: *const Context) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_preallocated_clone")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_preallocated_clone")] pub fn secp256k1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_context_randomize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_context_randomize")] pub fn secp256k1_context_randomize(cx: *mut Context, seed32: *const c_uchar) -> c_int; // Pubkeys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_parse")] pub fn secp256k1_ec_pubkey_parse(cx: *const Context, pk: *mut PublicKey, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_serialize")] pub fn secp256k1_ec_pubkey_serialize(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, pk: *const PublicKey, compressed: c_uint) -> c_int; // EC - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_create")] pub fn secp256k1_ec_pubkey_create(cx: *const Context, pk: *mut PublicKey, sk: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_negate")] pub fn secp256k1_ec_pubkey_negate(cx: *const Context, pk: *mut PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_tweak_add")] pub fn secp256k1_ec_pubkey_tweak_add(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul")] pub fn secp256k1_ec_pubkey_tweak_mul(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ec_pubkey_combine")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ec_pubkey_combine")] pub fn secp256k1_ec_pubkey_combine(cx: *const Context, out: *mut PublicKey, ins: *const *const PublicKey, n: c_int) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdh")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdh")] pub fn secp256k1_ecdh( cx: *const Context, output: *mut c_uchar, @@ -422,14 +422,14 @@ extern "C" { ) -> c_int; // ECDSA - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_verify")] pub fn secp256k1_ecdsa_verify(cx: *const Context, sig: *const Signature, msg32: *const c_uchar, pk: *const PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_sign")] pub fn secp256k1_ecdsa_sign(cx: *const Context, sig: *mut Signature, msg32: *const c_uchar, @@ -439,7 +439,7 @@ extern "C" { -> c_int; // Schnorr Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_schnorrsig_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_schnorrsig_sign")] pub fn secp256k1_schnorrsig_sign( cx: *const Context, sig: *mut c_uchar, @@ -449,7 +449,7 @@ extern "C" { noncedata: *const c_void ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_schnorrsig_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_schnorrsig_verify")] pub fn secp256k1_schnorrsig_verify( cx: *const Context, sig64: *const c_uchar, @@ -458,28 +458,28 @@ extern "C" { ) -> c_int; // Extra keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_keypair_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_create")] pub fn secp256k1_keypair_create( cx: *const Context, keypair: *mut KeyPair, seckey: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_xonly_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_xonly_pubkey_parse")] pub fn secp256k1_xonly_pubkey_parse( cx: *const Context, pubkey: *mut XOnlyPublicKey, input32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_xonly_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_xonly_pubkey_serialize")] pub fn secp256k1_xonly_pubkey_serialize( cx: *const Context, output32: *mut c_uchar, pubkey: *const XOnlyPublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey")] pub fn secp256k1_xonly_pubkey_from_pubkey( cx: *const Context, xonly_pubkey: *mut XOnlyPublicKey, @@ -487,7 +487,7 @@ extern "C" { pubkey: *const PublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add")] pub fn secp256k1_xonly_pubkey_tweak_add( cx: *const Context, output_pubkey: *mut PublicKey, @@ -495,7 +495,7 @@ extern "C" { tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_keypair_xonly_pub")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_xonly_pub")] pub fn secp256k1_keypair_xonly_pub( cx: *const Context, pubkey: *mut XOnlyPublicKey, @@ -503,14 +503,14 @@ extern "C" { keypair: *const KeyPair ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_keypair_xonly_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_xonly_tweak_add")] pub fn secp256k1_keypair_xonly_tweak_add( cx: *const Context, keypair: *mut KeyPair, tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check")] pub fn secp256k1_xonly_pubkey_tweak_add_check( cx: *const Context, tweaked_pubkey32: *const c_uchar, @@ -530,7 +530,7 @@ extern "C" { // In: flags: which parts of the context to initialize. #[no_mangle] #[cfg(all(feature = "std", not(rust_secp_no_symbol_renaming)))] -pub unsafe extern "C" fn rustsecp256k1_v0_4_0_context_create(flags: c_uint) -> *mut Context { +pub unsafe extern "C" fn rustsecp256k1_v0_4_1_context_create(flags: c_uint) -> *mut Context { use core::mem; use std::alloc; assert!(ALIGN_TO >= mem::align_of::()); @@ -550,7 +550,7 @@ pub unsafe extern "C" fn rustsecp256k1_v0_4_0_context_create(flags: c_uint) -> * #[cfg(all(feature = "std", not(rust_secp_no_symbol_renaming)))] pub unsafe fn secp256k1_context_create(flags: c_uint) -> *mut Context { - rustsecp256k1_v0_4_0_context_create(flags) + rustsecp256k1_v0_4_1_context_create(flags) } /// A reimplementation of the C function `secp256k1_context_destroy` in rust. @@ -561,7 +561,7 @@ pub unsafe fn secp256k1_context_create(flags: c_uint) -> *mut Context { /// #[no_mangle] #[cfg(all(feature = "std", not(rust_secp_no_symbol_renaming)))] -pub unsafe extern "C" fn rustsecp256k1_v0_4_0_context_destroy(ctx: *mut Context) { +pub unsafe extern "C" fn rustsecp256k1_v0_4_1_context_destroy(ctx: *mut Context) { use std::alloc; secp256k1_context_preallocated_destroy(ctx); let ptr = (ctx as *mut u8).sub(ALIGN_TO); @@ -572,7 +572,7 @@ pub unsafe extern "C" fn rustsecp256k1_v0_4_0_context_destroy(ctx: *mut Context) #[cfg(all(feature = "std", not(rust_secp_no_symbol_renaming)))] pub unsafe fn secp256k1_context_destroy(ctx: *mut Context) { - rustsecp256k1_v0_4_0_context_destroy(ctx) + rustsecp256k1_v0_4_1_context_destroy(ctx) } @@ -596,7 +596,7 @@ pub unsafe fn secp256k1_context_destroy(ctx: *mut Context) { /// #[no_mangle] #[cfg(not(rust_secp_no_symbol_renaming))] -pub unsafe extern "C" fn rustsecp256k1_v0_4_0_default_illegal_callback_fn(message: *const c_char, _data: *mut c_void) { +pub unsafe extern "C" fn rustsecp256k1_v0_4_1_default_illegal_callback_fn(message: *const c_char, _data: *mut c_void) { use core::str; let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); let msg = str::from_utf8_unchecked(msg_slice); @@ -619,7 +619,7 @@ pub unsafe extern "C" fn rustsecp256k1_v0_4_0_default_illegal_callback_fn(messag /// #[no_mangle] #[cfg(not(rust_secp_no_symbol_renaming))] -pub unsafe extern "C" fn rustsecp256k1_v0_4_0_default_error_callback_fn(message: *const c_char, _data: *mut c_void) { +pub unsafe extern "C" fn rustsecp256k1_v0_4_1_default_error_callback_fn(message: *const c_char, _data: *mut c_void) { use core::str; let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); let msg = str::from_utf8_unchecked(msg_slice); @@ -674,9 +674,9 @@ mod fuzz_dummy { #[cfg(rust_secp_no_symbol_renaming)] compile_error!("We do not support fuzzing with rust_secp_no_symbol_renaming"); extern "C" { - fn rustsecp256k1_v0_4_0_context_preallocated_size(flags: c_uint) -> size_t; - fn rustsecp256k1_v0_4_0_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context; - fn rustsecp256k1_v0_4_0_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context; + fn rustsecp256k1_v0_4_1_context_preallocated_size(flags: c_uint) -> size_t; + fn rustsecp256k1_v0_4_1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context; + fn rustsecp256k1_v0_4_1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context; } #[cfg(feature = "lowmemory")] @@ -685,7 +685,7 @@ mod fuzz_dummy { const CTX_SIZE: usize = 1024 * (1024 + 128); // Contexts pub unsafe fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t { - assert!(rustsecp256k1_v0_4_0_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); + assert!(rustsecp256k1_v0_4_1_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); CTX_SIZE } @@ -705,8 +705,8 @@ mod fuzz_dummy { if have_ctx == HAVE_CONTEXT_NONE { have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel); if have_ctx == HAVE_CONTEXT_NONE { - assert!(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); - assert_eq!(rustsecp256k1_v0_4_0_context_preallocated_create( + assert!(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); + assert_eq!(rustsecp256k1_v0_4_1_context_preallocated_create( PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void, SECP256K1_START_SIGN | SECP256K1_START_VERIFY), PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context); @@ -735,7 +735,7 @@ mod fuzz_dummy { let new_ptr = (prealloc as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::()); let flags = (orig_ptr as *mut c_uint).read(); (new_ptr as *mut c_uint).write(flags); - rustsecp256k1_v0_4_0_context_preallocated_clone(cx, prealloc) + rustsecp256k1_v0_4_1_context_preallocated_clone(cx, prealloc) } pub unsafe fn secp256k1_context_randomize(cx: *mut Context, diff --git a/secp256k1-sys/src/recovery.rs b/secp256k1-sys/src/recovery.rs index a5a6384..db4f5b3 100644 --- a/secp256k1-sys/src/recovery.rs +++ b/secp256k1-sys/src/recovery.rs @@ -36,17 +36,17 @@ impl Default for RecoverableSignature { } extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact")] pub fn secp256k1_ecdsa_recoverable_signature_parse_compact(cx: *const Context, sig: *mut RecoverableSignature, input64: *const c_uchar, recid: c_int) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact")] pub fn secp256k1_ecdsa_recoverable_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, recid: *mut c_int, sig: *const RecoverableSignature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert")] pub fn secp256k1_ecdsa_recoverable_signature_convert(cx: *const Context, sig: *mut Signature, input: *const RecoverableSignature) -> c_int; @@ -54,7 +54,7 @@ extern "C" { #[cfg(not(fuzzing))] extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_sign_recoverable")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_sign_recoverable")] pub fn secp256k1_ecdsa_sign_recoverable(cx: *const Context, sig: *mut RecoverableSignature, msg32: *const c_uchar, @@ -63,7 +63,7 @@ extern "C" { noncedata: *const c_void) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_0_ecdsa_recover")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_ecdsa_recover")] pub fn secp256k1_ecdsa_recover(cx: *const Context, pk: *mut PublicKey, sig: *const RecoverableSignature, From aa6bdaff3c7554ba018cceccf3ffcae8b40bf831 Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Wed, 16 Jun 2021 10:56:59 +0200 Subject: [PATCH 2/3] Creating SecretKey and PublicKey from BIP-340 KeyPair --- secp256k1-sys/src/lib.rs | 14 +++++++++++ src/key.rs | 4 ++-- src/schnorrsig.rs | 51 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index 330ca5b..974a60f 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -518,6 +518,20 @@ extern "C" { internal_pubkey: *const XOnlyPublicKey, tweak32: *const c_uchar, ) -> c_int; + + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_sec")] + pub fn secp256k1_keypair_sec( + cx: *const Context, + output_seckey: *mut c_uchar, + keypair: *const KeyPair + ) -> c_int; + + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_pub")] + pub fn secp256k1_keypair_pub( + cx: *const Context, + output_pubkey: *mut PublicKey, + keypair: *const KeyPair + ) -> c_int; } /// A reimplementation of the C function `secp256k1_context_create` in rust. diff --git a/src/key.rs b/src/key.rs index 0491b8e..834137c 100644 --- a/src/key.rs +++ b/src/key.rs @@ -27,7 +27,7 @@ use constants; use ffi::{self, CPtr}; /// Secret 256-bit key used as `x` in an ECDSA signature -pub struct SecretKey([u8; constants::SECRET_KEY_SIZE]); +pub struct SecretKey(pub(crate) [u8; constants::SECRET_KEY_SIZE]); impl_array_newtype!(SecretKey, u8, constants::SECRET_KEY_SIZE); impl_pretty_debug!(SecretKey); @@ -66,7 +66,7 @@ pub const ONE_KEY: SecretKey = SecretKey([0, 0, 0, 0, 0, 0, 0, 0, /// A Secp256k1 public key, used for verification of signatures #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] #[repr(transparent)] -pub struct PublicKey(ffi::PublicKey); +pub struct PublicKey(pub(crate) ffi::PublicKey); impl fmt::LowerHex for PublicKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/schnorrsig.rs b/src/schnorrsig.rs index ab3b6f4..d8c48a0 100644 --- a/src/schnorrsig.rs +++ b/src/schnorrsig.rs @@ -13,6 +13,7 @@ use core::{fmt, ptr, str}; use ffi::{self, CPtr}; use {constants, Secp256k1}; use {Message, Signing, Verification}; +use SecretKey; /// Represents a Schnorr signature. pub struct Signature([u8; constants::SCHNORRSIG_SIGNATURE_SIZE]); @@ -449,6 +450,38 @@ impl<'de> ::serde::Deserialize<'de> for PublicKey { } } +impl SecretKey { + /// Creates a new secret key using data from BIP-340 [`KeyPair`] + pub fn from_keypair(secp: &Secp256k1, keypair: &KeyPair) -> Self { + let mut sk = [0; constants::SECRET_KEY_SIZE]; + unsafe { + let ret = ffi::secp256k1_keypair_sec( + secp.ctx, + sk.as_mut_c_ptr(), + keypair.as_ptr() + ); + debug_assert_eq!(ret, 1); + } + SecretKey(sk) + } +} + +impl ::key::PublicKey { + /// Creates a new compressed public key key using data from BIP-340 [`KeyPair`] + pub fn from_keypair(secp: &Secp256k1, keypair: &KeyPair) -> Self { + unsafe { + let mut pk = ffi::PublicKey::new(); + let ret = ffi::secp256k1_keypair_pub( + secp.ctx, + &mut pk, + keypair.as_ptr() + ); + debug_assert_eq!(ret, 1); + ::key::PublicKey(pk) + } + } +} + impl Secp256k1 { fn schnorrsig_sign_helper( &self, @@ -573,6 +606,7 @@ mod tests { #[cfg(target_arch = "wasm32")] use wasm_bindgen_test::wasm_bindgen_test as test; + use SecretKey; macro_rules! hex_32 { ($hex:expr) => {{ @@ -669,7 +703,7 @@ mod tests { } #[test] - fn pubkey_from_slice() { + fn test_pubkey_from_slice() { assert_eq!(PublicKey::from_slice(&[]), Err(InvalidPublicKey)); assert_eq!(PublicKey::from_slice(&[1, 2, 3]), Err(InvalidPublicKey)); let pk = PublicKey::from_slice(&[ @@ -681,7 +715,7 @@ mod tests { } #[test] - fn pubkey_serialize_roundtrip() { + fn test_pubkey_serialize_roundtrip() { let secp = Secp256k1::new(); let (_, pubkey) = secp.generate_schnorrsig_keypair(&mut thread_rng()); let ser = pubkey.serialize(); @@ -689,6 +723,19 @@ mod tests { assert_eq!(pubkey, pubkey2); } + #[test] + fn test_xonly_key_extraction() { + let secp = Secp256k1::new(); + let sk_str = "688C77BC2D5AAFF5491CF309D4753B732135470D05B7B2CD21ADD0744FE97BEF"; + let keypair = KeyPair::from_seckey_str(&secp, sk_str).unwrap(); + let sk = SecretKey::from_keypair(&secp, &keypair); + assert_eq!(SecretKey::from_str(sk_str).unwrap(), sk); + let pk = ::key::PublicKey::from_keypair(&secp, &keypair); + assert_eq!(::key::PublicKey::from_secret_key(&secp, &sk), pk); + let xpk = PublicKey::from_keypair(&secp, &keypair); + assert_eq!(PublicKey::from(pk), xpk); + } + #[test] fn test_pubkey_from_bad_slice() { // Bad sizes From 455ee57ba4051bb2cfea5f5f675378170fb42c7f Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Fri, 18 Jun 2021 23:33:37 +0200 Subject: [PATCH 3/3] Bump sys version to 0.4.1 --- Cargo.toml | 2 +- secp256k1-sys/Cargo.toml | 2 +- secp256k1-sys/src/lib.rs | 26 ++++++++++++-------------- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f1b580f..986c6d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ global-context = ["std", "rand-std", "global-context-less-secure"] global-context-less-secure = [] [dependencies] -secp256k1-sys = { version = "0.4.0", default-features = false, path = "./secp256k1-sys" } +secp256k1-sys = { version = "0.4.1", default-features = false, path = "./secp256k1-sys" } bitcoin_hashes = { version = "0.9", optional = true } rand = { version = "0.6", default-features = false, optional = true } serde = { version = "1.0", default-features = false, optional = true } diff --git a/secp256k1-sys/Cargo.toml b/secp256k1-sys/Cargo.toml index 94b4348..9d5f4fb 100644 --- a/secp256k1-sys/Cargo.toml +++ b/secp256k1-sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "secp256k1-sys" -version = "0.4.0" +version = "0.4.1" authors = [ "Dawid Ciężarkiewicz ", "Andrew Poelstra ", "Steven Roose " ] diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index 974a60f..7577eca 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -348,6 +348,18 @@ extern "C" { sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; + + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_sec")] + pub fn secp256k1_keypair_sec(cx: *const Context, + output_seckey: *mut c_uchar, + keypair: *const KeyPair) + -> c_int; + + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_pub")] + pub fn secp256k1_keypair_pub(cx: *const Context, + output_pubkey: *mut PublicKey, + keypair: *const KeyPair) + -> c_int; } #[cfg(not(fuzzing))] @@ -518,20 +530,6 @@ extern "C" { internal_pubkey: *const XOnlyPublicKey, tweak32: *const c_uchar, ) -> c_int; - - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_sec")] - pub fn secp256k1_keypair_sec( - cx: *const Context, - output_seckey: *mut c_uchar, - keypair: *const KeyPair - ) -> c_int; - - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_4_1_keypair_pub")] - pub fn secp256k1_keypair_pub( - cx: *const Context, - output_pubkey: *mut PublicKey, - keypair: *const KeyPair - ) -> c_int; } /// A reimplementation of the C function `secp256k1_context_create` in rust.