Merge pull request #306 from LNP-BP/extrakeys/update-1

Extracting SecretKey and uncompressed PublicKey from BIP-350 KeyPair
This commit is contained in:
Andrew Poelstra 2021-06-18 22:13:47 +00:00 committed by GitHub
commit 05f4278499
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
108 changed files with 10090 additions and 7178 deletions

View File

@ -29,7 +29,7 @@ global-context = ["std", "rand-std", "global-context-less-secure"]
global-context-less-secure = []
[dependencies]
secp256k1-sys = { version = "0.4.0", default-features = false, path = "./secp256k1-sys" }
secp256k1-sys = { version = "0.4.1", default-features = false, path = "./secp256k1-sys" }
bitcoin_hashes = { version = "0.9", optional = true }
rand = { version = "0.6", default-features = false, optional = true }
serde = { version = "1.0", default-features = false, optional = true }

View File

@ -1,6 +1,6 @@
[package]
name = "secp256k1-sys"
version = "0.4.0"
version = "0.4.1"
authors = [ "Dawid Ciężarkiewicz <dpc@ucore.info>",
"Andrew Poelstra <apoelstra@wpsoftware.net>",
"Steven Roose <steven@stevenroose.org>" ]
@ -12,7 +12,7 @@ description = "FFI for Pieter Wuille's `libsecp256k1` library."
keywords = [ "secp256k1", "libsecp256k1", "ffi" ]
readme = "README.md"
build = "build.rs"
links = "rustsecp256k1_v0_4_0"
links = "rustsecp256k1_v0_4_1"
# Should make docs.rs show all functions, even those behind non-default features
[package.metadata.docs.rs]

View File

@ -1,2 +1,2 @@
# This file was automatically created by ./vendor-libsecp.sh
98dac87839838b86094f1bccc71cc20e67b146cc
1758a92ffd896af533b142707e9892ea6e15e5db

View File

@ -0,0 +1,315 @@
env:
WIDEMUL: auto
STATICPRECOMPUTATION: yes
ECMULTGENPRECISION: auto
ASM: no
BUILD: check
WITH_VALGRIND: yes
EXTRAFLAGS:
HOST:
ECDH: no
RECOVERY: no
SCHNORRSIG: no
EXPERIMENTAL: no
CTIMETEST: yes
BENCH: yes
TEST_ITERS:
BENCH_ITERS: 2
MAKEFLAGS: -j2
cat_logs_snippet: &CAT_LOGS
always:
cat_tests_log_script:
- cat tests.log || true
cat_exhaustive_tests_log_script:
- cat exhaustive_tests.log || true
cat_valgrind_ctime_test_log_script:
- cat valgrind_ctime_test.log || true
cat_bench_log_script:
- cat bench.log || true
on_failure:
cat_config_log_script:
- cat config.log || true
cat_test_env_script:
- cat test_env.log || true
cat_ci_env_script:
- env
merge_base_script_snippet: &MERGE_BASE
merge_base_script:
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
- git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
- git config --global user.email "ci@ci.ci"
- git config --global user.name "ci"
- git merge FETCH_HEAD # Merge base to detect silent merge conflicts
task:
name: "x86_64: Linux (Debian stable)"
container:
dockerfile: ci/linux-debian.Dockerfile
# Reduce number of CPUs to be able to do more builds in parallel.
cpu: 1
# More than enough for our scripts.
memory: 1G
matrix: &ENV_MATRIX
- env: {WIDEMUL: int64, RECOVERY: yes}
- env: {WIDEMUL: int64, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes}
- env: {WIDEMUL: int128}
- env: {WIDEMUL: int128, RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes}
- env: {WIDEMUL: int128, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes}
- env: {WIDEMUL: int128, ASM: x86_64}
- env: { RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes}
- env: { STATICPRECOMPUTATION: no}
- env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no}
- env: {CPPFLAGS: -DDETERMINISTIC}
- env: {CFLAGS: -O0, CTIMETEST: no}
- env: { ECMULTGENPRECISION: 2 }
- env: { ECMULTGENPRECISION: 8 }
matrix:
- env:
CC: gcc
- env:
CC: clang
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "i686: Linux (Debian stable)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
HOST: i686-linux-gnu
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
matrix:
- env:
CC: i686-linux-gnu-gcc
- env:
CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "x86_64: macOS Catalina"
macos_instance:
image: catalina-base
env:
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
# Cirrus gives us a fixed number of 12 virtual CPUs. Not that we even have that many jobs at the moment...
MAKEFLAGS: -j13
matrix:
<< : *ENV_MATRIX
matrix:
- env:
CC: gcc-9
- env:
CC: clang
# Update Command Line Tools
# Uncomment this if the Command Line Tools on the CirrusCI macOS image are too old to brew valgrind.
# See https://apple.stackexchange.com/a/195963 for the implementation.
## update_clt_script:
## - system_profiler SPSoftwareDataType
## - touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
## - |-
## PROD=$(softwareupdate -l | grep "*.*Command Line" | tail -n 1 | awk -F"*" '{print $2}' | sed -e 's/^ *//' | sed 's/Label: //g' | tr -d '\n')
## # For debugging
## - softwareupdate -l && echo "PROD: $PROD"
## - softwareupdate -i "$PROD" --verbose
## - rm /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
##
brew_valgrind_pre_script:
- brew config
- brew tap --shallow LouisBrunner/valgrind
# Fetch valgrind source but don't build it yet.
- brew fetch --HEAD LouisBrunner/valgrind/valgrind
brew_valgrind_cache:
# This is $(brew --cellar valgrind) but command substition does not work here.
folder: /usr/local/Cellar/valgrind
# Rebuild cache if ...
fingerprint_script:
# ... macOS version changes:
- sw_vers
# ... brew changes:
- brew config
# ... valgrind changes:
- git -C "$(brew --cache)/valgrind--git" rev-parse HEAD
populate_script:
# If there's no hit in the cache, build and install valgrind.
- brew install --HEAD LouisBrunner/valgrind/valgrind
brew_valgrind_post_script:
# If we have restored valgrind from the cache, tell brew to create symlink to the PATH.
# If we haven't restored from cached (and just run brew install), this is a no-op.
- brew link valgrind
brew_script:
- brew install automake libtool gcc@9
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "s390x (big-endian): Linux (Debian stable, QEMU)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
WRAPPER_CMD: qemu-s390x
TEST_ITERS: 16
HOST: s390x-linux-gnu
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
<< : *MERGE_BASE
test_script:
# https://sourceware.org/bugzilla/show_bug.cgi?id=27008
- rm /etc/ld.so.cache
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "ARM32: Linux (Debian stable, QEMU)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
WRAPPER_CMD: qemu-arm
TEST_ITERS: 16
HOST: arm-linux-gnueabihf
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
matrix:
- env: {}
- env: {ASM: arm}
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "ARM64: Linux (Debian stable, QEMU)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
WRAPPER_CMD: qemu-aarch64
TEST_ITERS: 16
HOST: aarch64-linux-gnu
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "ppc64le: Linux (Debian stable, QEMU)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
WRAPPER_CMD: qemu-ppc64le
TEST_ITERS: 16
HOST: powerpc64le-linux-gnu
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
task:
name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)"
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
WRAPPER_CMD: wine64-stable
TEST_ITERS: 16
HOST: x86_64-w64-mingw32
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
# Sanitizers
task:
container:
dockerfile: ci/linux-debian.Dockerfile
cpu: 1
memory: 1G
env:
ECDH: yes
RECOVERY: yes
EXPERIMENTAL: yes
SCHNORRSIG: yes
CTIMETEST: no
EXTRAFLAGS: "--disable-openssl-tests"
matrix:
- name: "Valgrind (memcheck)"
env:
# The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html)
WRAPPER_CMD: "valgrind --error-exitcode=42"
TEST_ITERS: 16
- name: "UBSan, ASan, LSan"
env:
CFLAGS: "-fsanitize=undefined,address"
CFLAGS_FOR_BUILD: "-fsanitize=undefined,address"
UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1"
ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1"
LSAN_OPTIONS: "use_unaligned=1"
TEST_ITERS: 32
# Try to cover many configurations with just a tiny matrix.
matrix:
- env:
ASM: auto
STATICPRECOMPUTATION: yes
- env:
ASM: no
STATICPRECOMPUTATION: no
ECMULTGENPRECISION: 2
matrix:
- env:
CC: clang
- env:
HOST: i686-linux-gnu
CC: i686-linux-gnu-gcc
<< : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS

View File

@ -33,6 +33,14 @@ libtool
*~
*.log
*.trs
coverage/
coverage.html
coverage.*.html
*.gcda
*.gcno
*.gcov
src/libsecp256k1-config.h
src/libsecp256k1-config.h.in
src/ecmult_static_context.h

View File

@ -1,109 +0,0 @@
language: c
os:
- linux
- osx
dist: bionic
# Valgrind currently supports upto macOS 10.13, the latest xcode of that version is 10.1
osx_image: xcode10.1
addons:
apt:
packages:
- libgmp-dev
- valgrind
- libtool-bin
compiler:
- clang
- gcc
env:
global:
- WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
matrix:
- WIDEMUL=int64 RECOVERY=yes
- WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128
- WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ASM=x86_64
- BIGNUM=no
- BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no
- CPPFLAGS=-DDETERMINISTIC
- CFLAGS=-O0 CTIMETEST=no
- CFLAGS="-fsanitize=undefined -fno-omit-frame-pointer" LDFLAGS="-fsanitize=undefined -fno-omit-frame-pointer" UBSAN_OPTIONS="print_stacktrace=1:halt_on_error=1" BIGNUM=no ASM=x86_64 ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes CTIMETEST=no
- ECMULTGENPRECISION=2
- ECMULTGENPRECISION=8
- RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes EXTRAFLAGS="--disable-openssl-tests" BUILD=
matrix:
fast_finish: true
include:
- compiler: clang
os: linux
env: HOST=i686-linux-gnu
addons:
apt:
packages:
- gcc-multilib
- libgmp-dev:i386
- valgrind
- libtool-bin
- libc6-dbg:i386
- compiler: clang
env: HOST=i686-linux-gnu
os: linux
addons:
apt:
packages:
- gcc-multilib
- valgrind
- libtool-bin
- libc6-dbg:i386
- compiler: gcc
env: HOST=i686-linux-gnu
os: linux
addons:
apt:
packages:
- gcc-multilib
- valgrind
- libtool-bin
- libc6-dbg:i386
- compiler: gcc
os: linux
env: HOST=i686-linux-gnu
addons:
apt:
packages:
- gcc-multilib
- libgmp-dev:i386
- valgrind
- libtool-bin
- libc6-dbg:i386
# S390x build (big endian system)
- compiler: gcc
env: HOST=s390x-unknown-linux-gnu ECDH=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes CTIMETEST=
arch: s390x
# We use this to install macOS dependencies instead of the built in `homebrew` plugin,
# because in xcode earlier than 11 they have a bug requiring updating the system which overall takes ~8 minutes.
# https://travis-ci.community/t/macos-build-fails-because-of-homebrew-bundle-unknown-command/7296
before_install:
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install gmp valgrind gcc@9; fi
before_script: ./autogen.sh
# travis auto terminates jobs that go for 10 minutes without printing to stdout, but travis_wait doesn't work well with forking programs like valgrind (https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received https://github.com/bitcoin-core/secp256k1/pull/750#issuecomment-623476860)
script:
- function keep_alive() { while true; do echo -en "\a"; sleep 60; done }
- keep_alive &
- ./contrib/travis.sh
- kill %keep_alive
after_script:
- cat ./tests.log
- cat ./exhaustive_tests.log
- cat ./valgrind_ctime_test.log
- cat ./bench.log
- $CC --version
- valgrind --version

View File

@ -2,7 +2,7 @@ ACLOCAL_AMFLAGS = -I build-aux/m4
lib_LTLIBRARIES = libsecp256k1.la
include_HEADERS = include/secp256k1.h
include_HEADERS += include/rustsecp256k1_v0_4_0_preallocated.h
include_HEADERS += include/rustsecp256k1_v0_4_1_preallocated.h
noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
@ -14,8 +14,6 @@ noinst_HEADERS += src/scalar_8x32_impl.h
noinst_HEADERS += src/scalar_low_impl.h
noinst_HEADERS += src/group.h
noinst_HEADERS += src/group_impl.h
noinst_HEADERS += src/num_gmp.h
noinst_HEADERS += src/num_gmp_impl.h
noinst_HEADERS += src/ecdsa.h
noinst_HEADERS += src/ecdsa_impl.h
noinst_HEADERS += src/eckey.h
@ -26,14 +24,16 @@ noinst_HEADERS += src/ecmult_const.h
noinst_HEADERS += src/ecmult_const_impl.h
noinst_HEADERS += src/ecmult_gen.h
noinst_HEADERS += src/ecmult_gen_impl.h
noinst_HEADERS += src/num.h
noinst_HEADERS += src/num_impl.h
noinst_HEADERS += src/field_10x26.h
noinst_HEADERS += src/field_10x26_impl.h
noinst_HEADERS += src/field_5x52.h
noinst_HEADERS += src/field_5x52_impl.h
noinst_HEADERS += src/field_5x52_int128_impl.h
noinst_HEADERS += src/field_5x52_asm_impl.h
noinst_HEADERS += src/modinv32.h
noinst_HEADERS += src/modinv32_impl.h
noinst_HEADERS += src/modinv64.h
noinst_HEADERS += src/modinv64_impl.h
noinst_HEADERS += src/assumptions.h
noinst_HEADERS += src/util.h
noinst_HEADERS += src/scratch.h
@ -52,7 +52,7 @@ noinst_HEADERS += contrib/lax_der_privatekey_parsing.h
noinst_HEADERS += contrib/lax_der_privatekey_parsing.c
if USE_EXTERNAL_ASM
COMMON_LIB = librustsecp256k1_v0_4_0_common.la
COMMON_LIB = librustsecp256k1_v0_4_1_common.la
noinst_LTLIBRARIES = $(COMMON_LIB)
else
COMMON_LIB =
@ -63,16 +63,16 @@ pkgconfig_DATA = libsecp256k1.pc
if USE_EXTERNAL_ASM
if USE_ASM_ARM
librustsecp256k1_v0_4_0_common_la_SOURCES = src/asm/field_10x26_arm.s
librustsecp256k1_v0_4_1_common_la_SOURCES = src/asm/field_10x26_arm.s
endif
endif
librustsecp256k1_v0_4_0_la_SOURCES = src/secp256k1.c
librustsecp256k1_v0_4_0_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
librustsecp256k1_v0_4_0_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB)
librustsecp256k1_v0_4_1_la_SOURCES = src/secp256k1.c
librustsecp256k1_v0_4_1_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
librustsecp256k1_v0_4_1_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB)
if VALGRIND_ENABLED
librustsecp256k1_v0_4_0_la_CPPFLAGS += -DVALGRIND
librustsecp256k1_v0_4_1_la_CPPFLAGS += -DVALGRIND
endif
noinst_PROGRAMS =
@ -81,27 +81,27 @@ noinst_PROGRAMS += bench_verify bench_sign bench_internal bench_ecmult
bench_verify_SOURCES = src/bench_verify.c
bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
# SECP_TEST_INCLUDES are only used here for CRYPTO_CPPFLAGS
bench_verify_CPPFLAGS = -DSECP256K1_BUILD $(SECP_TEST_INCLUDES)
bench_verify_CPPFLAGS = $(SECP_TEST_INCLUDES)
bench_sign_SOURCES = src/bench_sign.c
bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
bench_internal_SOURCES = src/bench_internal.c
bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB)
bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES)
bench_internal_CPPFLAGS = $(SECP_INCLUDES)
bench_ecmult_SOURCES = src/bench_ecmult.c
bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB)
bench_ecmult_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES)
bench_ecmult_CPPFLAGS = $(SECP_INCLUDES)
endif
TESTS =
if USE_TESTS
noinst_PROGRAMS += tests
tests_SOURCES = src/tests.c
tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
if VALGRIND_ENABLED
tests_CPPFLAGS += -DVALGRIND
noinst_PROGRAMS += valgrind_ctime_test
valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c
valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB)
valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
endif
if !ENABLE_COVERAGE
tests_CPPFLAGS += -DVERIFY
@ -114,7 +114,7 @@ endif
if USE_EXHAUSTIVE_TESTS
noinst_PROGRAMS += exhaustive_tests
exhaustive_tests_SOURCES = src/tests_exhaustive.c
exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES)
exhaustive_tests_CPPFLAGS = -I$(top_srcdir)/src $(SECP_INCLUDES)
if !ENABLE_COVERAGE
exhaustive_tests_CPPFLAGS += -DVERIFY
endif
@ -129,12 +129,12 @@ CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -I$(builddir)/src
gen_context_OBJECTS = gen_context.o
gen_context_BIN = gen_context$(BUILD_EXEEXT)
gen_%.o: src/gen_%.c src/libsecp256k1-config.h
$(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
$(CC_FOR_BUILD) $(DEFS) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
$(gen_context_BIN): $(gen_context_OBJECTS)
$(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@
$(librustsecp256k1_v0_4_0_la_OBJECTS): src/ecmult_static_context.h
$(librustsecp256k1_v0_4_1_la_OBJECTS): src/ecmult_static_context.h
$(tests_OBJECTS): src/ecmult_static_context.h
$(bench_internal_OBJECTS): src/ecmult_static_context.h
$(bench_ecmult_OBJECTS): src/ecmult_static_context.h

View File

@ -1,7 +1,7 @@
libsecp256k1
============
[![Build Status](https://travis-ci.org/bitcoin-core/secp256k1.svg?branch=master)](https://travis-ci.org/bitcoin-core/secp256k1)
[![Build Status](https://api.cirrus-ci.com/github/bitcoin-core/secp256k1.svg?branch=master)](https://cirrus-ci.com/github/bitcoin-core/secp256k1)
Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1.
@ -34,11 +34,11 @@ Implementation details
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
* Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys).
* Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
* Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman).
* Scalar operations
* Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
* Using 4 64-bit limbs (relying on __int128 support in the compiler).
* Using 8 32-bit limbs.
* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
* Group operations
* Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
* Use addition between points in Jacobian and affine coordinates where possible.
@ -96,7 +96,8 @@ To create a report, `gcovr` is recommended, as it includes branch coverage repor
To create a HTML report with coloured and annotated source code:
$ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html
$ mkdir -p coverage
$ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html
Reporting a vulnerability
------------

View File

@ -75,15 +75,10 @@ if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then
fi
])
dnl
AC_DEFUN([SECP_GMP_CHECK],[
if test x"$has_gmp" != x"yes"; then
AC_DEFUN([SECP_VALGRIND_CHECK],[
if test x"$has_valgrind" != x"yes"; then
CPPFLAGS_TEMP="$CPPFLAGS"
CPPFLAGS="$GMP_CPPFLAGS $CPPFLAGS"
LIBS_TEMP="$LIBS"
LIBS="$GMP_LIBS $LIBS"
AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS="$GMP_LIBS -lgmp"; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])])
CPPFLAGS="$CPPFLAGS_TEMP"
LIBS="$LIBS_TEMP"
CPPFLAGS="$VALGRIND_CPPFLAGS $CPPFLAGS"
AC_CHECK_HEADER([valgrind/memcheck.h], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed])])
fi
])

View File

@ -3,46 +3,49 @@
set -e
set -x
if [ "$HOST" = "i686-linux-gnu" ]
then
export CC="$CC -m32"
fi
if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$TRAVIS_COMPILER" = "gcc" ]
then
export CC="gcc-9"
fi
export LC_ALL=C
env >> test_env.log
$CC -v || true
valgrind --version || true
./autogen.sh
./configure \
--enable-experimental="$EXPERIMENTAL" \
--with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \
--with-test-override-wide-multiply="$WIDEMUL" --with-asm="$ASM" \
--enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \
--enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
--enable-module-schnorrsig="$SCHNORRSIG" \
--with-valgrind="$WITH_VALGRIND" \
--host="$HOST" $EXTRAFLAGS
if [ -n "$BUILD" ]
then
make -j2 "$BUILD"
fi
if [ "$RUN_VALGRIND" = "yes" ]
then
make -j2
# the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html)
valgrind --error-exitcode=42 ./tests 16
valgrind --error-exitcode=42 ./exhaustive_tests
fi
# We have set "-j<n>" in MAKEFLAGS.
make
# Print information about binaries so that we can see that the architecture is correct
file *tests* || true
file bench_* || true
file .libs/* || true
# This tells `make check` to wrap test invocations.
export LOG_COMPILER="$WRAPPER_CMD"
# This limits the iterations in the tests and benchmarks.
export SECP256K1_TEST_ITERS="$TEST_ITERS"
export SECP256K1_BENCH_ITERS="$BENCH_ITERS"
make "$BUILD"
if [ "$BENCH" = "yes" ]
then
if [ "$RUN_VALGRIND" = "yes" ]
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute'
if [ -n "$WRAPPER_CMD" ]
then
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute valgrind --error-exitcode=42'
else
EXEC=
EXEC="$EXEC $WRAPPER_CMD"
fi
# This limits the iterations in the benchmarks below to ITER(set in .travis.yml) iterations.
export SECP256K1_BENCH_ITERS="$ITERS"
{
$EXEC ./bench_ecmult
$EXEC ./bench_internal

View File

@ -0,0 +1,24 @@
FROM debian:stable
RUN dpkg --add-architecture i386
RUN dpkg --add-architecture s390x
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture ppc64el
RUN apt-get update
# dkpg-dev: to make pkg-config work in cross-builds
# llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces
RUN apt-get install --no-install-recommends --no-upgrade -y \
git ca-certificates \
make automake libtool pkg-config dpkg-dev valgrind qemu-user \
gcc clang llvm libc6-dbg \
gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan5:i386 \
gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \
gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \
gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \
wine gcc-mingw-w64-x86-64
# Run a dummy command in wine to make it set up configuration
RUN wine64-stable xcopy || true

View File

@ -14,7 +14,7 @@ AM_INIT_AUTOMAKE([foreign subdir-objects])
: ${CFLAGS="-g"}
LT_INIT
dnl make the compilation flags quiet unless V=1 is used
# Make the compilation flags quiet unless V=1 is used.
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
PKG_PROG_PKG_CONFIG
@ -22,9 +22,16 @@ PKG_PROG_PKG_CONFIG
AC_PATH_TOOL(AR, ar)
AC_PATH_TOOL(RANLIB, ranlib)
AC_PATH_TOOL(STRIP, strip)
AX_PROG_CC_FOR_BUILD
# Save definition of AC_PROG_CC because AM_PROG_CC_C_O in automake<=1.13 will
# redefine AC_PROG_CC to exit with an error, which avoids the user calling it
# accidently and screwing up the effect of AM_PROG_CC_C_O. However, we'll need
# AC_PROG_CC later on in AX_PROG_CC_FOR_BUILD, where its usage is fine, and
# we'll carefully make sure not to call AC_PROG_CC anywhere else.
m4_copy([AC_PROG_CC], [saved_AC_PROG_CC])
AM_PROG_CC_C_O
# Restore AC_PROG_CC
m4_rename_force([saved_AC_PROG_CC], [AC_PROG_CC])
AC_PROG_CC_C89
if test x"$ac_cv_prog_cc_c89" = x"no"; then
@ -37,25 +44,23 @@ case $host_os in
if test x$cross_compiling != xyes; then
AC_PATH_PROG([BREW],brew,)
if test x$BREW != x; then
dnl These Homebrew packages may be keg-only, meaning that they won't be found
dnl in expected paths because they may conflict with system files. Ask
dnl Homebrew where each one is located, then adjust paths accordingly.
# These Homebrew packages may be keg-only, meaning that they won't be found
# in expected paths because they may conflict with system files. Ask
# Homebrew where each one is located, then adjust paths accordingly.
openssl_prefix=`$BREW --prefix openssl 2>/dev/null`
gmp_prefix=`$BREW --prefix gmp 2>/dev/null`
valgrind_prefix=`$BREW --prefix valgrind 2>/dev/null`
if test x$openssl_prefix != x; then
PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH"
export PKG_CONFIG_PATH
CRYPTO_CPPFLAGS="-I$openssl_prefix/include"
fi
if test x$gmp_prefix != x; then
GMP_CPPFLAGS="-I$gmp_prefix/include"
GMP_LIBS="-L$gmp_prefix/lib"
if test x$valgrind_prefix != x; then
VALGRIND_CPPFLAGS="-I$valgrind_prefix/include"
fi
else
AC_PATH_PROG([PORT],port,)
dnl if homebrew isn't installed and macports is, add the macports default paths
dnl as a last resort.
# If homebrew isn't installed and macports is, add the macports default paths
# as a last resort.
if test x$PORT != x; then
CPPFLAGS="$CPPFLAGS -isystem /opt/local/include"
LDFLAGS="$LDFLAGS -L/opt/local/lib"
@ -77,6 +82,15 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
CFLAGS="$saved_CFLAGS"
])
saved_CFLAGS="$CFLAGS"
CFLAGS="-Wconditional-uninitialized $CFLAGS"
AC_MSG_CHECKING([if ${CC} supports -Wconditional-uninitialized])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
[ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
CFLAGS="$saved_CFLAGS"
])
saved_CFLAGS="$CFLAGS"
CFLAGS="-fvisibility=hidden $CFLAGS"
AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden])
@ -86,6 +100,10 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
CFLAGS="$saved_CFLAGS"
])
###
### Define config arguments
###
AC_ARG_ENABLE(benchmark,
AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]),
[use_benchmark=$enableval],
@ -146,13 +164,10 @@ AC_ARG_ENABLE(external_default_callbacks,
[use_external_default_callbacks=$enableval],
[use_external_default_callbacks=no])
dnl Test-only override of the (autodetected by the C code) "widemul" setting.
dnl Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default).
# Test-only override of the (autodetected by the C code) "widemul" setting.
# Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default).
AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
@ -177,15 +192,22 @@ AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto],
)],
[req_valgrind=$withval], [req_valgrind=auto])
###
### Handle config options (except for modules)
###
if test x"$req_valgrind" = x"no"; then
enable_valgrind=no
else
AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [
SECP_VALGRIND_CHECK
if test x"$has_valgrind" != x"yes"; then
if test x"$req_valgrind" = x"yes"; then
AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available])
fi
enable_valgrind=no
], [])
else
enable_valgrind=yes
fi
fi
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
@ -197,61 +219,6 @@ else
CFLAGS="-O2 $CFLAGS"
fi
if test x"$use_ecmult_static_precomputation" != x"no"; then
# Temporarily switch to an environment for the native compiler
save_cross_compiling=$cross_compiling
cross_compiling=no
SAVE_CC="$CC"
CC="$CC_FOR_BUILD"
SAVE_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS_FOR_BUILD"
SAVE_CPPFLAGS="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS_FOR_BUILD"
SAVE_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS_FOR_BUILD"
warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function"
saved_CFLAGS="$CFLAGS"
CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS"
AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
[ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
CFLAGS="$saved_CFLAGS"
])
AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}])
AC_RUN_IFELSE(
[AC_LANG_PROGRAM([], [])],
[working_native_cc=yes],
[working_native_cc=no],[:])
CFLAGS_FOR_BUILD="$CFLAGS"
# Restore the environment
cross_compiling=$save_cross_compiling
CC="$SAVE_CC"
CFLAGS="$SAVE_CFLAGS"
CPPFLAGS="$SAVE_CPPFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
if test x"$working_native_cc" = x"no"; then
AC_MSG_RESULT([no])
set_precomp=no
m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.])
if test x"$use_ecmult_static_precomputation" = x"yes"; then
AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
else
AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
fi
else
AC_MSG_RESULT([yes])
set_precomp=yes
fi
else
set_precomp=no
fi
if test x"$req_asm" = x"auto"; then
SECP_64BIT_ASM_CHECK
if test x"$has_64bit_asm" = x"yes"; then
@ -279,33 +246,7 @@ else
esac
fi
if test x"$req_bignum" = x"auto"; then
SECP_GMP_CHECK
if test x"$has_gmp" = x"yes"; then
set_bignum=gmp
fi
if test x"$set_bignum" = x; then
set_bignum=no
fi
else
set_bignum=$req_bignum
case $set_bignum in
gmp)
SECP_GMP_CHECK
if test x"$has_gmp" != x"yes"; then
AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available])
fi
;;
no)
;;
*)
AC_MSG_ERROR([invalid bignum implementation selection])
;;
esac
fi
# select assembly optimization
# Select assembly optimization
use_external_asm=no
case $set_asm in
@ -322,7 +263,12 @@ no)
;;
esac
# select wide multiplication implementation
if test x"$use_external_asm" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
fi
# Select wide multiplication implementation
case $set_widemul in
int128)
AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation])
@ -337,25 +283,7 @@ auto)
;;
esac
# select bignum implementation
case $set_bignum in
gmp)
AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed])
AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num])
AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation])
AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation])
;;
no)
AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation])
AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation])
AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation])
;;
*)
AC_MSG_ERROR([invalid bignum implementation])
;;
esac
#set ecmult window size
# Set ecmult window size
if test x"$req_ecmult_window" = x"auto"; then
set_ecmult_window=15
else
@ -377,7 +305,7 @@ case $set_ecmult_window in
;;
esac
#set ecmult gen precision
# Set ecmult gen precision
if test x"$req_ecmult_gen_precision" = x"auto"; then
set_ecmult_gen_precision=4
else
@ -419,15 +347,93 @@ else
enable_openssl_tests=no
fi
if test x"$set_bignum" = x"gmp"; then
SECP_LIBS="$SECP_LIBS $GMP_LIBS"
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
if test x"$enable_valgrind" = x"yes"; then
SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS"
fi
# Handle static precomputation (after everything which modifies CFLAGS and friends)
if test x"$use_ecmult_static_precomputation" != x"no"; then
if test x"$cross_compiling" = x"no"; then
set_precomp=yes
if test x"${CC_FOR_BUILD+x}${CFLAGS_FOR_BUILD+x}${CPPFLAGS_FOR_BUILD+x}${LDFLAGS_FOR_BUILD+x}" != x; then
AC_MSG_WARN([CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD is set but ignored because we are not cross-compiling.])
fi
# If we're not cross-compiling, simply use the same compiler for building the static precompation code.
CC_FOR_BUILD="$CC"
CFLAGS_FOR_BUILD="$CFLAGS"
CPPFLAGS_FOR_BUILD="$CPPFLAGS"
LDFLAGS_FOR_BUILD="$LDFLAGS"
else
AX_PROG_CC_FOR_BUILD
# Temporarily switch to an environment for the native compiler
save_cross_compiling=$cross_compiling
cross_compiling=no
SAVE_CC="$CC"
CC="$CC_FOR_BUILD"
SAVE_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS_FOR_BUILD"
SAVE_CPPFLAGS="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS_FOR_BUILD"
SAVE_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS_FOR_BUILD"
warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function"
saved_CFLAGS="$CFLAGS"
CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS"
AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
[ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
CFLAGS="$saved_CFLAGS"
])
AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}])
AC_RUN_IFELSE(
[AC_LANG_PROGRAM([], [])],
[working_native_cc=yes],
[working_native_cc=no],[:])
CFLAGS_FOR_BUILD="$CFLAGS"
# Restore the environment
cross_compiling=$save_cross_compiling
CC="$SAVE_CC"
CFLAGS="$SAVE_CFLAGS"
CPPFLAGS="$SAVE_CPPFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
if test x"$working_native_cc" = x"no"; then
AC_MSG_RESULT([no])
set_precomp=no
m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.])
if test x"$use_ecmult_static_precomputation" = x"yes"; then
AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
else
AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
fi
else
AC_MSG_RESULT([yes])
set_precomp=yes
fi
fi
AC_SUBST(CC_FOR_BUILD)
AC_SUBST(CFLAGS_FOR_BUILD)
AC_SUBST(CPPFLAGS_FOR_BUILD)
AC_SUBST(LDFLAGS_FOR_BUILD)
else
set_precomp=no
fi
if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
###
### Handle module options
###
if test x"$enable_module_ecdh" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module])
fi
@ -447,14 +453,14 @@ if test x"$enable_module_extrakeys" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module])
fi
if test x"$use_external_asm" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
fi
if test x"$use_external_default_callbacks" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used])
fi
###
### Check for --enable-experimental if necessary
###
if test x"$enable_experimental" = x"yes"; then
AC_MSG_NOTICE([******])
AC_MSG_NOTICE([WARNING: experimental build])
@ -474,6 +480,10 @@ else
fi
fi
###
### Generate output
###
AC_CONFIG_HEADERS([src/libsecp256k1-config.h])
AC_CONFIG_FILES([Makefile libsecp256k1.pc])
AC_SUBST(SECP_INCLUDES)
@ -492,7 +502,7 @@ AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" =
AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"])
AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"])
dnl make sure nothing new is exported so that we don't break the cache
# Make sure nothing new is exported so that we don't break the cache.
PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH"
unset PKG_CONFIG_PATH
PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP"
@ -513,10 +523,9 @@ echo " module extrakeys = $enable_module_extrakeys"
echo " module schnorrsig = $enable_module_schnorrsig"
echo
echo " asm = $set_asm"
echo " bignum = $set_bignum"
echo " ecmult window size = $set_ecmult_window"
echo " ecmult gen prec. bits = $set_ecmult_gen_precision"
dnl Hide test-only options unless they're used.
# Hide test-only options unless they're used.
if test x"$set_widemul" != xauto; then
echo " wide multiplication = $set_widemul"
fi
@ -527,3 +536,9 @@ echo " CFLAGS = $CFLAGS"
echo " CPPFLAGS = $CPPFLAGS"
echo " LDFLAGS = $LDFLAGS"
echo
if test x"$set_precomp" = x"yes"; then
echo " CC_FOR_BUILD = $CC_FOR_BUILD"
echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD"
echo " CPPFLAGS_FOR_BUILD = $CPPFLAGS_FOR_BUILD"
echo " LDFLAGS_FOR_BUILD = $LDFLAGS_FOR_BUILD"
fi

View File

@ -5,11 +5,10 @@
***********************************************************************/
#include <string.h>
#include <secp256k1.h>
#include "lax_der_parsing.h"
int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
size_t rpos, rlen, spos, slen;
size_t pos = 0;
size_t lenbyte;
@ -17,7 +16,7 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_
int overflow = 0;
/* Hack to initialize sig with a correctly-parsed but invalid signature. */
rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
/* Sequence tag byte */
if (pos == inputlen || input[pos] != 0x30) {
@ -138,11 +137,11 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_4_
}
if (!overflow) {
overflow = !rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
overflow = !rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
}
if (overflow) {
memset(tmpsig, 0, 64);
rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
}
return 1;
}

View File

@ -26,8 +26,8 @@
* certain violations are easily supported. You may need to adapt it.
*
* Do not use this for new systems. Use well-defined DER or compact signatures
* instead if you have the choice (see rustsecp256k1_v0_4_0_ecdsa_signature_parse_der and
* rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact).
* instead if you have the choice (see rustsecp256k1_v0_4_1_ecdsa_signature_parse_der and
* rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact).
*
* The supported violations are:
* - All numbers are parsed as nonnegative integers, even though X.609-0207
@ -51,7 +51,13 @@
#ifndef SECP256K1_CONTRIB_LAX_DER_PARSING_H
#define SECP256K1_CONTRIB_LAX_DER_PARSING_H
/* #include secp256k1.h only when it hasn't been included yet.
This enables this file to be #included directly in other project
files (such as tests.c) without the need to set an explicit -I flag,
which would be necessary to locate secp256k1.h. */
#ifndef SECP256K1_H
#include <secp256k1.h>
#endif
#ifdef __cplusplus
extern "C" {
@ -77,9 +83,9 @@ extern "C" {
* encoded numbers are out of range, signature validation with it is
* guaranteed to fail for every message and public key.
*/
int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der_lax(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der_lax(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -5,11 +5,10 @@
***********************************************************************/
#include <string.h>
#include <secp256k1.h>
#include "lax_der_privatekey_parsing.h"
int ec_privkey_import_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
int ec_privkey_import_der(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
const unsigned char *end = privkey + privkeylen;
int lenb = 0;
int len = 0;
@ -46,17 +45,17 @@ int ec_privkey_import_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char
return 0;
}
memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
if (!rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, out32)) {
if (!rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, out32)) {
memset(out32, 0, 32);
return 0;
}
return 1;
}
int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
rustsecp256k1_v0_4_0_pubkey pubkey;
int ec_privkey_export_der(const rustsecp256k1_v0_4_1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
rustsecp256k1_v0_4_1_pubkey pubkey;
size_t pubkeylen = 0;
if (!rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, key32)) {
if (!rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, key32)) {
*privkeylen = 0;
return 0;
}
@ -80,7 +79,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char
memcpy(ptr, key32, 32); ptr += 32;
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
pubkeylen = 33;
rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
} else {
@ -105,7 +104,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_4_0_context *ctx, unsigned char
memcpy(ptr, key32, 32); ptr += 32;
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
pubkeylen = 65;
rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
}

View File

@ -28,7 +28,13 @@
#ifndef SECP256K1_CONTRIB_BER_PRIVATEKEY_H
#define SECP256K1_CONTRIB_BER_PRIVATEKEY_H
/* #include secp256k1.h only when it hasn't been included yet.
This enables this file to be #included directly in other project
files (such as tests.c) without the need to set an explicit -I flag,
which would be necessary to locate secp256k1.h. */
#ifndef SECP256K1_H
#include <secp256k1.h>
#endif
#ifdef __cplusplus
extern "C" {
@ -52,10 +58,10 @@ extern "C" {
* simple 32-byte private keys are sufficient.
*
* Note that this function does not guarantee correct DER output. It is
* guaranteed to be parsable by rustsecp256k1_v0_4_0_ec_privkey_import_der
* guaranteed to be parsable by rustsecp256k1_v0_4_1_ec_privkey_import_der
*/
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
const rustsecp256k1_v0_4_0_context* ctx,
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *privkey,
size_t *privkeylen,
const unsigned char *seckey,
@ -77,7 +83,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
* key.
*/
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der(
const rustsecp256k1_v0_4_0_context* ctx,
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *privkey,
size_t privkeylen

View File

@ -0,0 +1,765 @@
# The safegcd implementation in libsecp256k1 explained
This document explains the modular inverse implementation in the `src/modinv*.h` files. It is based
on the paper
["Fast constant-time gcd computation and modular inversion"](https://gcd.cr.yp.to/papers.html#safegcd)
by Daniel J. Bernstein and Bo-Yin Yang. The references below are for the Date: 2019.04.13 version.
The actual implementation is in C of course, but for demonstration purposes Python3 is used here.
Most implementation aspects and optimizations are explained, except those that depend on the specific
number representation used in the C code.
## 1. Computing the Greatest Common Divisor (GCD) using divsteps
The algorithm from the paper (section 11), at a very high level, is this:
```python
def gcd(f, g):
"""Compute the GCD of an odd integer f and another integer g."""
assert f & 1 # require f to be odd
delta = 1 # additional state variable
while g != 0:
assert f & 1 # f will be odd in every iteration
if delta > 0 and g & 1:
delta, f, g = 1 - delta, g, (g - f) // 2
elif g & 1:
delta, f, g = 1 + delta, f, (g + f) // 2
else:
delta, f, g = 1 + delta, f, (g ) // 2
return abs(f)
```
It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop
keeps rewriting the variables *f* and *g* alongside a state variable *&delta;* that starts at *1*, until
*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a
"division step" (referred to as divstep in what follows).
For example, *gcd(21, 14)* would be computed as:
- Start with *&delta;=1 f=21 g=14*
- Take the third branch: *&delta;=2 f=21 g=7*
- Take the first branch: *&delta;=-1 f=7 g=-7*
- Take the second branch: *&delta;=0 f=7 g=0*
- The answer *|f| = 7*.
Why it works:
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
- (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*.
- (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even).
- Neither of those two operations change the GCD:
- For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a&thinsp;c* and *g=b&thinsp;c* for some integers *a*
and *b*. As *(g,g-f)=(b&thinsp;c,(b-a)c)* and *(f,f+g)=(a&thinsp;c,(a+b)c)*, the result clearly still has
common factor *c*. Reasoning in the other direction shows that no common factor can be added by
doing so either.
- For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove
it from *g*.
- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3).
- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the
gcd of *f'* and *0* is *|f'|* by definition, that is our answer.
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
the low-order bits of the variables to decide the next steps, and being easy to make
constant-time (in more low-level languages than Python). The *&delta;* parameter is necessary to
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
at high order bits.
Properties that will become important later:
- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*.
- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we
do not need to worry about rounding.
- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N*
bits of *f* and *g*, and on *&delta;*.
## 2. From GCDs to modular inverses
We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a&thinsp;x=1
mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is
prime and *0 < x < M*. In what follows, assume that the modular inverse exists.
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
of how the internal variables can be written as linear combinations of the inputs at every step
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a&thinsp;x + b&thinsp;M = 1*.
Taking that expression *mod M* gives *a&thinsp;x mod M = 1*, and we see that *a* is the modular inverse of *x
mod M*.
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping
track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*.
*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M*
and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M
= 1*).
```python
def div2(M, x):
"""Helper routine to compute x/2 mod M (where M is odd)."""
assert M & 1
if x & 1: # If x is odd, make it even by adding M.
x += M
# x must be even now, so a clean division by 2 is possible.
return x // 2
def modinv(M, x):
"""Compute the inverse of x mod M (given that it exists, and M is odd)."""
assert M & 1
delta, f, g, d, e = 1, M, x, 0, 1
while g != 0:
# Note that while division by two for f and g is only ever done on even inputs, this is
# not true for d and e, so we need the div2 helper function.
if delta > 0 and g & 1:
delta, f, g, d, e = 1 - delta, g, (g - f) // 2, e, div2(M, e - d)
elif g & 1:
delta, f, g, d, e = 1 + delta, f, (g + f) // 2, d, div2(M, e + d)
else:
delta, f, g, d, e = 1 + delta, f, (g ) // 2, d, div2(M, e )
# Verify that the invariants d=f/x mod M, e=g/x mod M are maintained.
assert f % M == (d * x) % M
assert g % M == (e * x) % M
assert f == 1 or f == -1 # |f| is the GCD, it must be 1
# Because of invariant d = f/x (mod M), 1/x = d/f (mod M). As |f|=1, d/f = d*f.
return (d * f) % M
```
Also note that this approach to track *d* and *e* throughout the computation to determine the inverse
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
entire computation is determined (see section 3 below) and the inverse is computed from that.
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
be faster at the level of optimization we're able to do in C.
## 3. Batching multiple divsteps
Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)*
to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
```
t = [ u, v ]
[ q, r ]
[ out_f ] = (1/2 * t) * [ in_f ]
[ out_g ] = [ in_g ]
[ out_d ] = (1/2 * t) * [ in_d ] (mod M)
[ out_e ] [ in_e ]
```
where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is
taken. As above, the resulting *f* and *g* are always integers.
Performing multiple divsteps corresponds to a multiplication with the product of all the
individual divsteps' transition matrices. As each transition matrix consists of integers
divided by *2*, the product of these matrices will consist of integers divided by *2<sup>N</sup>* (see also
theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay
them: we compute the integer coefficients of the combined transition matrix scaled by *2<sup>N</sup>*, and
do one division by *2<sup>N</sup>* as a final step:
```python
def divsteps_n_matrix(delta, f, g):
"""Compute delta and transition matrix t after N divsteps (multiplied by 2^N)."""
u, v, q, r = 1, 0, 0, 1 # start with identity matrix
for _ in range(N):
if delta > 0 and g & 1:
delta, f, g, u, v, q, r = 1 - delta, g, (g - f) // 2, 2*q, 2*r, q-u, r-v
elif g & 1:
delta, f, g, u, v, q, r = 1 + delta, f, (g + f) // 2, 2*u, 2*v, q+u, r+v
else:
delta, f, g, u, v, q, r = 1 + delta, f, (g ) // 2, 2*u, 2*v, q , r
return delta, (u, v, q, r)
```
As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*,
*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
integers could set *N=62* and compute the full transition matrix for 62 steps at once without any
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps.
We still need functions to compute:
```
[ out_f ] = (1/2^N * [ u, v ]) * [ in_f ]
[ out_g ] ( [ q, r ]) [ in_g ]
[ out_d ] = (1/2^N * [ u, v ]) * [ in_d ] (mod M)
[ out_e ] ( [ q, r ]) [ in_e ]
```
Because the divsteps transformation only ever divides even numbers by two, the result of *t&thinsp;[f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f*
and *g* will be multiple of *2<sup>N</sup>*, and division by *2<sup>N</sup>* is simply shifting them down:
```python
def update_fg(f, g, t):
"""Multiply matrix t/2^N with [f, g]."""
u, v, q, r = t
cf, cg = u*f + v*g, q*f + r*g
# (t / 2^N) should cleanly apply to [f,g] so the result of t*[f,g] should have N zero
# bottom bits.
assert cf % 2**N == 0
assert cg % 2**N == 0
return cf >> N, cg >> N
```
The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2<sup>N</sup> mod M*.
This is easy if we have precomputed *1/M mod 2<sup>N</sup>* (which always exists for odd *M*):
```python
def div2n(M, Mi, x):
"""Compute x/2^N mod M, given Mi = 1/M mod 2^N."""
assert (M * Mi) % 2**N == 1
# Find a factor m such that m*M has the same bottom N bits as x. We want:
# (m * M) mod 2^N = x mod 2^N
# <=> m mod 2^N = (x / M) mod 2^N
# <=> m mod 2^N = (x * Mi) mod 2^N
m = (Mi * x) % 2**N
# Subtract that multiple from x, cancelling its bottom N bits.
x -= m * M
# Now a clean division by 2^N is possible.
assert x % 2**N == 0
return (x >> N) % M
def update_de(d, e, t, M, Mi):
"""Multiply matrix t/2^N with [d, e], modulo M."""
u, v, q, r = t
cd, ce = u*d + v*e, q*d + r*e
return div2n(M, Mi, cd), div2n(M, Mi, ce)
```
With all of those, we can write a version of `modinv` that performs *N* divsteps at once:
```python3
def modinv(M, Mi, x):
"""Compute the modular inverse of x mod M, given Mi=1/M mod 2^N."""
assert M & 1
delta, f, g, d, e = 1, M, x, 0, 1
while g != 0:
# Compute the delta and transition matrix t for the next N divsteps (this only needs
# (N+1)-bit signed integer arithmetic).
delta, t = divsteps_n_matrix(delta, f % 2**N, g % 2**N)
# Apply the transition matrix t to [f, g]:
f, g = update_fg(f, g, t)
# Apply the transition matrix t to [d, e]:
d, e = update_de(d, e, t, M, Mi)
return (d * f) % M
```
This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem
because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *&delta;* keeps
increasing). For variable time code such excess iterations will be mostly optimized away in later
sections.
## 4. Avoiding modulus operations
So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the
sign of *f*. These are relatively expensive operations when done generically.
To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range
*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
operation at the end:
```python
def update_de(d, e, t, M, Mi):
"""Multiply matrix t/2^N with [d, e] mod M, given Mi=1/M mod 2^N."""
u, v, q, r = t
cd, ce = u*d + v*e, q*d + r*e
# Cancel out bottom N bits of cd and ce.
md = -((Mi * cd) % 2**N)
me = -((Mi * ce) % 2**N)
cd += md * M
ce += me * M
# And cleanly divide by 2**N.
return cd >> N, ce >> N
```
Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|*
never exceed *2<sup>N</sup>* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have
outputs whose absolute values are at most *2<sup>N</sup>* times the maximum absolute input value. In case the
inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming
*M > 1*, the multiplication results in numbers in range *(-2<sup>N</sup>M,2<sup>N</sup>M)*. Subtracting less than *2<sup>N</sup>*
times *M* to cancel out *N* bits brings that up to *(-2<sup>N+1</sup>M,2<sup>N</sup>M)*, and
dividing by *2<sup>N</sup>* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that
to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be
counteracted by incrementing *d* and *e* by *M* whenever they're negative:
```python
...
if d < 0:
d += M
if e < 0:
e += M
cd, ce = u*d + v*e, q*d + r*e
# Cancel out bottom N bits of cd and ce.
...
```
With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the
output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de`
invocations there are. In what follows, we will try to make this more efficient.
Note that increasing *d* by *M* is equal to incrementing *cd* by *u&thinsp;M* and *ce* by *q&thinsp;M*. Similarly,
increasing *e* by *M* is equal to incrementing *cd* by *v&thinsp;M* and *ce* by *r&thinsp;M*. So we could instead write:
```python
...
cd, ce = u*d + v*e, q*d + r*e
# Perform the equivalent of incrementing d, e by M when they're negative.
if d < 0:
cd += u*M
ce += q*M
if e < 0:
cd += v*M
ce += r*M
# Cancel out bottom N bits of cd and ce.
md = -((Mi * cd) % 2**N)
me = -((Mi * ce) % 2**N)
cd += md * M
ce += me * M
...
```
Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this
increment, and the decrement that cancels out bottom bits. The second one depends on the first
one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce*
at first, and using that to compute the final *md*, *me* values:
```python
def update_de(d, e, t, M, Mi):
"""Multiply matrix t/2^N with [d, e], modulo M."""
u, v, q, r = t
md, me = 0, 0
# Compute what multiples of M to add to cd and ce.
if d < 0:
md += u
me += q
if e < 0:
md += v
me += r
# Compute bottom N bits of t*[d,e] + M*[md,me].
cd, ce = (u*d + v*e + md*M) % 2**N, (q*d + r*e + me*M) % 2**N
# Correct md and me such that the bottom N bits of t*[d,e] + M*[md,me] are zero.
md -= (Mi * cd) % 2**N
me -= (Mi * ce) % 2**N
# Do the full computation.
cd, ce = u*d + v*e + md*M, q*d + r*e + me*M
# And cleanly divide by 2**N.
return cd >> N, ce >> N
```
One last optimization: we can avoid the *md&thinsp;M* and *me&thinsp;M* multiplications in the bottom bits of *cd*
and *ce* by moving them to the *md* and *me* correction:
```python
...
# Compute bottom N bits of t*[d,e].
cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N
# Correct md and me such that the bottom N bits of t*[d,e]+M*[md,me] are zero.
# Note that this is not the same as {md = (-Mi * cd) % 2**N} etc. That would also result in N
# zero bottom bits, but isn't guaranteed to be a reduction of [0,2^N) compared to the
# previous md and me values, and thus would violate our bounds analysis.
md -= (Mi*cd + md) % 2**N
me -= (Mi*ce + me) % 2**N
...
```
The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same
range. That also means that the *d* value at the end of `modinv` will be in that range, while we want
a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the
conditional negation of *d* (based on the sign of *f*) into it as well:
```python
def normalize(sign, v, M):
"""Compute sign*v mod M, where v is in range (-2*M,M); output in [0,M)."""
assert sign == 1 or sign == -1
# v in (-2*M,M)
if v < 0:
v += M
# v in (-M,M). Now multiply v with sign (which can only be 1 or -1).
if sign == -1:
v = -v
# v in (-M,M)
if v < 0:
v += M
# v in [0,M)
return v
```
And calling it in `modinv` is simply:
```python
...
return normalize(f, d, M)
```
## 5. Constant-time operation
The primary selling point of the algorithm is fast constant-time operation. What code flow still
depends on the input data so far?
- the number of iterations of the while *g &ne; 0* loop in `modinv`
- the branches inside `divsteps_n_matrix`
- the sign checks in `update_de`
- the sign checks in `normalize`
To make the while loop in `modinv` constant time it can be replaced with a constant number of
iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is
sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of
*&lceil;724/N&rceil;* times.
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
`valgrind_ctime_test.c` for automated tests that this isn't the case). To do so, observe that a
divstep can be written instead as (compare to the inner loop of `gcd` in section 1).
```python
x = -f if delta > 0 else f # set x equal to (input) -f or f
if g & 1:
g += x # set g to (input) g-f or g+f
if delta > 0:
delta = -delta
f += g # set f to (input) g (note that g was set to g-f before)
delta += 1
g >>= 1
```
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As
*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows
that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then
*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*.
Using this we can write:
```python
x = -f if delta > 0 else f
```
in constant-time form as:
```python
c1 = (-delta) >> 63
# Conditionally negate f based on c1:
x = (f ^ c1) - c1
```
To use that trick, we need a helper mask variable *c1* that resolves the condition *&delta;>0* to *-1*
(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by
the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all
numbers in range *[-2<sup>63</sup>,0)* to *-1*, and numbers in range *[0,2<sup>63</sup>)* to *0*.
Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write:
```python
if g & 1:
g += x
```
as:
```python
# Compute c2=0 if g is even and c2=-1 if g is odd.
c2 = -(g & 1)
# This masks out x if g is even, and leaves x be if g is odd.
g += x & c2
```
Using the conditional negation trick again we can write:
```python
if g & 1:
if delta > 0:
delta = -delta
```
as:
```python
# Compute c3=-1 if g is odd and delta>0, and 0 otherwise.
c3 = c1 & c2
# Conditionally negate delta based on c3:
delta = (delta ^ c3) - c3
```
Finally:
```python
if g & 1:
if delta > 0:
f += g
```
becomes:
```python
f += g & c3
```
It turns out that this can be implemented more efficiently by applying the substitution
*&eta;=-&delta;*. In this representation, negating *&delta;* corresponds to negating *&eta;*, and incrementing
*&delta;* corresponds to decrementing *&eta;*. This allows us to remove the negation in the *c1*
computation:
```python
# Compute a mask c1 for eta < 0, and compute the conditional negation x of f:
c1 = eta >> 63
x = (f ^ c1) - c1
# Compute a mask c2 for odd g, and conditionally add x to g:
c2 = -(g & 1)
g += x & c2
# Compute a mask c for (eta < 0) and odd (input) g, and use it to conditionally negate eta,
# and add g to f:
c3 = c1 & c2
eta = (eta ^ c3) - c3
f += g & c3
# Incrementing delta corresponds to decrementing eta.
eta -= 1
g >>= 1
```
A variant of divsteps with better worst-case performance can be used instead: starting *&delta;* at
*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs
(which can be shown using convex hull analysis). In this case, the substitution *&zeta;=-(&delta;+1/2)*
is used instead to keep the variable integral. Incrementing *&delta;* by *1* still translates to
decrementing *&zeta;* by *1*, but negating *&delta;* now corresponds to going from *&zeta;* to *-(&zeta;+1)*, or
*~&zeta;*. Doing that conditionally based on *c3* is simply:
```python
...
c3 = c1 & c2
zeta ^= c3
...
```
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of
`divsteps_n_matrix` is obtained. The full code will be in section 7.
These bit fiddling tricks can also be used to make the conditional negations and additions in
`update_de` and `normalize` constant-time.
## 6. Variable-time optimizations
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
Constant time operations are only necessary when computing modular inverses of secret data. In
other cases, it slows down calculations unnecessarily. In this section, we will construct a
faster non-constant time `divsteps_n_matrix` function.
To do so, first consider yet another way of writing the inner loop of divstep operations in
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
the original version with initial *&delta;=1* and *&eta;=-&delta;* here.
```python
for _ in range(N):
if g & 1 and eta < 0:
eta, f, g = -eta, g, -f
if g & 1:
g += f
eta -= 1
g >>= 1
```
Whenever *g* is even, the loop only shifts *g* down and decreases *&eta;*. When *g* ends in multiple zero
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
bits efficiently, which is possible on most platforms; it is abstracted here as the function
`count_trailing_zeros`.
```python
def count_trailing_zeros(v):
"""For a non-zero value v, find z such that v=(d<<z) for some odd d."""
return (v & -v).bit_length() - 1
i = N # divsteps left to do
while True:
# Get rid of all bottom zeros at once. In the first iteration, g may be odd and the following
# lines have no effect (until "if eta < 0").
zeros = min(i, count_trailing_zeros(g))
eta -= zeros
g >>= zeros
i -= zeros
if i == 0:
break
# We know g is odd now
if eta < 0:
eta, f, g = -eta, g, -f
g += f
# g is even now, and the eta decrement and g shift will happen in the next loop.
```
We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever
there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as
well.
Observe that as long as *&eta; &geq; 0*, the loop does not modify *f*. Instead, it cancels out bottom
bits of *g* and shifts them out, and decreases *&eta;* and *i* accordingly - interrupting only when *&eta;*
becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to
*g* to cancel out multiple bottom bits, and then shifting them out.
It is easy to find what that multiple is: we want a number *w* such that *g+w&thinsp;f* has a few bottom
zero bits. If that number of bits is *L*, we want *g+w&thinsp;f mod 2<sup>L</sup> = 0*, or *w = -g/f mod 2<sup>L</sup>*. Since *f*
is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before
doing more) or more than *&eta;+1* steps (as we'd run `eta, f, g = -eta, g, f` at that point), but
apart from that, we're only limited by the complexity of computing *w*.
This code demonstrates how to cancel up to 4 bits per step:
```python
NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n
i = N
while True:
zeros = min(i, count_trailing_zeros(g))
eta -= zeros
g >>= zeros
i -= zeros
if i == 0:
break
# We know g is odd now
if eta < 0:
eta, f, g = -eta, g, f
# Compute limit on number of bits to cancel
limit = min(min(eta + 1, i), 4)
# Compute w = -g/f mod 2**limit, using the table value for -1/f mod 2**4. Note that f is
# always odd, so its inverse modulo a power of two always exists.
w = (g * NEGINV16[(f & 15) // 2]) % (2**limit)
# As w = -g/f mod (2**limit), g+w*f mod 2**limit = 0 mod 2**limit.
g += w * f
assert g % (2**limit) == 0
# The next iteration will now shift out at least limit bottom zero bits from g.
```
By using a bigger table more bits can be cancelled at once. The table can also be implemented
as a formula. Several formulas are known for computing modular inverses modulo powers of two;
some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pages 245-247.
Here we need the negated modular inverse, which is a simple transformation of those:
- Instead of a 3-bit table:
- *-f* or *f ^ 6*
- Instead of a 4-bit table:
- *1 - f(f + 1)*
- *-(f + (((f + 1) & 4) << 1))*
- For larger tables the following technique can be used: if *w=-1/f mod 2<sup>L</sup>*, then *w(w&thinsp;f+2)* is
*-1/f mod 2<sup>2L</sup>*. This allows extending the previous formulas (or tables). In particular we
have this 6-bit function (based on the 3-bit function above):
- *f(f<sup>2</sup> - 2)*
This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
## 7. Final Python version
All together we need the following functions:
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
from section 2, but with its loop replaced by a variant of the constant-time divstep from
section 5, extended to handle *u*, *v*, *q*, *r*:
```python
def divsteps_n_matrix(zeta, f, g):
"""Compute zeta and transition matrix t after N divsteps (multiplied by 2^N)."""
u, v, q, r = 1, 0, 0, 1 # start with identity matrix
for _ in range(N):
c1 = zeta >> 63
# Compute x, y, z as conditionally-negated versions of f, u, v.
x, y, z = (f ^ c1) - c1, (u ^ c1) - c1, (v ^ c1) - c1
c2 = -(g & 1)
# Conditionally add x, y, z to g, q, r.
g, q, r = g + (x & c2), q + (y & c2), r + (z & c2)
c1 &= c2 # reusing c1 here for the earlier c3 variable
zeta = (zeta ^ c1) - 1 # inlining the unconditional zeta decrement here
# Conditionally add g, q, r to f, u, v.
f, u, v = f + (g & c1), u + (q & c1), v + (r & c1)
# When shifting g down, don't shift q, r, as we construct a transition matrix multiplied
# by 2^N. Instead, shift f's coefficients u and v up.
g, u, v = g >> 1, u << 1, v << 1
return zeta, (u, v, q, r)
```
- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time
changes to `update_de` from section 5:
```python
def update_fg(f, g, t):
"""Multiply matrix t/2^N with [f, g]."""
u, v, q, r = t
cf, cg = u*f + v*g, q*f + r*g
return cf >> N, cg >> N
def update_de(d, e, t, M, Mi):
"""Multiply matrix t/2^N with [d, e], modulo M."""
u, v, q, r = t
d_sign, e_sign = d >> 257, e >> 257
md, me = (u & d_sign) + (v & e_sign), (q & d_sign) + (r & e_sign)
cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N
md -= (Mi*cd + md) % 2**N
me -= (Mi*ce + me) % 2**N
cd, ce = u*d + v*e + M*md, q*d + r*e + M*me
return cd >> N, ce >> N
```
- The `normalize` function from section 4, made constant time as well:
```python
def normalize(sign, v, M):
"""Compute sign*v mod M, where v in (-2*M,M); output in [0,M)."""
v_sign = v >> 257
# Conditionally add M to v.
v += M & v_sign
c = (sign - 1) >> 1
# Conditionally negate v.
v = (v ^ c) - c
v_sign = v >> 257
# Conditionally add M to v again.
v += M & v_sign
return v
```
- And finally the `modinv` function too, adapted to use *&zeta;* instead of *&delta;*, and using the fixed
iteration count from section 5:
```python
def modinv(M, Mi, x):
"""Compute the modular inverse of x mod M, given Mi=1/M mod 2^N."""
zeta, f, g, d, e = -1, M, x, 0, 1
for _ in range((590 + N - 1) // N):
zeta, t = divsteps_n_matrix(zeta, f % 2**N, g % 2**N)
f, g = update_fg(f, g, t)
d, e = update_de(d, e, t, M, Mi)
return normalize(f, d, M)
```
- To get a variable time version, replace the `divsteps_n_matrix` function with one that uses the
divsteps loop from section 5, and a `modinv` version that calls it without the fixed iteration
count:
```python
NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n
def divsteps_n_matrix_var(eta, f, g):
"""Compute eta and transition matrix t after N divsteps (multiplied by 2^N)."""
u, v, q, r = 1, 0, 0, 1
i = N
while True:
zeros = min(i, count_trailing_zeros(g))
eta, i = eta - zeros, i - zeros
g, u, v = g >> zeros, u << zeros, v << zeros
if i == 0:
break
if eta < 0:
eta, f, u, v, g, q, r = -eta, g, q, r, -f, -u, -v
limit = min(min(eta + 1, i), 4)
w = (g * NEGINV16[(f & 15) // 2]) % (2**limit)
g, q, r = g + w*f, q + w*u, r + w*v
return eta, (u, v, q, r)
def modinv_var(M, Mi, x):
"""Compute the modular inverse of x mod M, given Mi = 1/M mod 2^N."""
eta, f, g, d, e = -1, M, x, 0, 1
while g != 0:
eta, t = divsteps_n_matrix_var(eta, f % 2**N, g % 2**N)
f, g = update_fg(f, g, t)
d, e = update_de(d, e, t, M, Mi)
return normalize(f, d, Mi)
```

View File

@ -7,11 +7,13 @@ extern "C" {
#include <stddef.h>
/* These rules specify the order of arguments in API calls:
/* Unless explicitly stated all pointer arguments must not be NULL.
*
* The following rules specify the order of arguments in API calls:
*
* 1. Context pointers go first, followed by output arguments, combined
* output/input arguments, and finally input-only arguments.
* 2. Array lengths always immediately the follow the argument whose length
* 2. Array lengths always immediately follow the argument whose length
* they describe, even if this violates rule 1.
* 3. Within the OUT/OUTIN/IN groups, pointers to data that is typically generated
* later go first. This means: signatures, public nonces, secret nonces,
@ -35,13 +37,13 @@ extern "C" {
* A constructed context can safely be used from multiple threads
* simultaneously, but API calls that take a non-const pointer to a context
* need exclusive access to it. In particular this is the case for
* rustsecp256k1_v0_4_0_context_destroy, rustsecp256k1_v0_4_0_context_preallocated_destroy,
* and rustsecp256k1_v0_4_0_context_randomize.
* rustsecp256k1_v0_4_1_context_destroy, rustsecp256k1_v0_4_1_context_preallocated_destroy,
* and rustsecp256k1_v0_4_1_context_randomize.
*
* Regarding randomization, either do it once at creation time (in which case
* you do not need any locking for the other calls), or use a read-write lock.
*/
typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context;
typedef struct rustsecp256k1_v0_4_1_context_struct rustsecp256k1_v0_4_1_context;
/** Opaque data structure that holds rewriteable "scratch space"
*
@ -54,19 +56,20 @@ typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context;
* Unlike the context object, this cannot safely be shared between threads
* without additional synchronization logic.
*/
typedef struct rustsecp256k1_v0_4_0_scratch_space_struct rustsecp256k1_v0_4_0_scratch_space;
typedef struct rustsecp256k1_v0_4_1_scratch_space_struct rustsecp256k1_v0_4_1_scratch_space;
/** Opaque data structure that holds a parsed and valid public key.
*
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use rustsecp256k1_v0_4_0_ec_pubkey_serialize and rustsecp256k1_v0_4_0_ec_pubkey_parse.
* If you need to convert to a format suitable for storage or transmission,
* use rustsecp256k1_v0_4_1_ec_pubkey_serialize and rustsecp256k1_v0_4_1_ec_pubkey_parse. To
* compare keys, use rustsecp256k1_v0_4_1_ec_pubkey_cmp.
*/
typedef struct {
unsigned char data[64];
} rustsecp256k1_v0_4_0_pubkey;
} rustsecp256k1_v0_4_1_pubkey;
/** Opaque data structured that holds a parsed ECDSA signature.
*
@ -74,12 +77,12 @@ typedef struct {
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions.
* comparison, use the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions.
*/
typedef struct {
unsigned char data[64];
} rustsecp256k1_v0_4_0_ecdsa_signature;
} rustsecp256k1_v0_4_1_ecdsa_signature;
/** A pointer to a function to deterministically generate a nonce.
*
@ -97,7 +100,7 @@ typedef struct {
* Except for test cases, this function should compute some cryptographic hash of
* the message, the algorithm, the key and the attempt.
*/
typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
typedef int (*rustsecp256k1_v0_4_1_nonce_function)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
@ -127,6 +130,17 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
# define SECP256K1_INLINE inline
# endif
/** When this header is used at build-time the SECP256K1_BUILD define needs to be set
* to correctly setup export attributes and nullness checks. This is normally done
* by secp256k1.c but to guard against this header being included before secp256k1.c
* has had a chance to set the define (e.g. via test harnesses that just includes
* secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the
* BUILD define so this condition can be caught.
*/
#ifndef SECP256K1_BUILD
# define SECP256K1_NO_BUILD
#endif
#ifndef SECP256K1_API
# if defined(_WIN32)
# ifdef SECP256K1_BUILD
@ -165,14 +179,14 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
#define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10)
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
/** Flags to pass to rustsecp256k1_v0_4_0_context_create, rustsecp256k1_v0_4_0_context_preallocated_size, and
* rustsecp256k1_v0_4_0_context_preallocated_create. */
/** Flags to pass to rustsecp256k1_v0_4_1_context_create, rustsecp256k1_v0_4_1_context_preallocated_size, and
* rustsecp256k1_v0_4_1_context_preallocated_create. */
#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
#define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY)
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
/** Flag to pass to rustsecp256k1_v0_4_0_ec_pubkey_serialize. */
/** Flag to pass to rustsecp256k1_v0_4_1_ec_pubkey_serialize. */
#define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
#define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION)
@ -188,25 +202,25 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
* API consistency, but currently do not require expensive precomputations or dynamic
* allocations.
*/
SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp;
SECP256K1_API extern const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp;
/** Create a secp256k1 context object (in dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h.
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h.
*
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
*
* See also rustsecp256k1_v0_4_0_context_randomize.
* See also rustsecp256k1_v0_4_1_context_randomize.
*/
/** Copy a secp256k1 context object (into dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h.
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
@ -216,14 +230,14 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co
*
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using rustsecp256k1_v0_4_0_context_create
* or rustsecp256k1_v0_4_0_context_clone. If the context has instead been created using
* rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone, the
* behaviour is undefined. In that case, rustsecp256k1_v0_4_0_context_preallocated_destroy must
* The context to destroy must have been created using rustsecp256k1_v0_4_1_context_create
* or rustsecp256k1_v0_4_1_context_clone. If the context has instead been created using
* rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone, the
* behaviour is undefined. In that case, rustsecp256k1_v0_4_1_context_preallocated_destroy must
* be used instead.
*
* Args: ctx: an existing context to destroy, constructed using
* rustsecp256k1_v0_4_0_context_create or rustsecp256k1_v0_4_0_context_clone
* rustsecp256k1_v0_4_1_context_create or rustsecp256k1_v0_4_1_context_clone
*/
/** Set a callback function to be called when an illegal argument is passed to
@ -247,11 +261,11 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co
* USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build
* has been configured with --enable-external-default-callbacks. Then the
* following two symbols must be provided to link against:
* - void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* message, void* data);
* The library can call these default handlers even before a proper callback data
* pointer could have been set using rustsecp256k1_v0_4_0_context_set_illegal_callback or
* rustsecp256k1_v0_4_0_context_set_error_callback, e.g., when the creation of a context
* pointer could have been set using rustsecp256k1_v0_4_1_context_set_illegal_callback or
* rustsecp256k1_v0_4_1_context_set_error_callback, e.g., when the creation of a context
* fails. In this case, the corresponding default handler will be called with
* the data pointer argument set to NULL.
*
@ -261,10 +275,10 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_co
* (NULL restores the default handler.)
* data: the opaque pointer to pass to fun above.
*
* See also rustsecp256k1_v0_4_0_context_set_error_callback.
* See also rustsecp256k1_v0_4_1_context_set_error_callback.
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API void rustsecp256k1_v0_4_1_context_set_illegal_callback(
rustsecp256k1_v0_4_1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
@ -275,21 +289,21 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback(
* This can only trigger in case of a hardware failure, miscompilation,
* memory corruption, serious bug in the library, or other error would can
* otherwise result in undefined behaviour. It will not trigger due to mere
* incorrect usage of the API (see rustsecp256k1_v0_4_0_context_set_illegal_callback
* incorrect usage of the API (see rustsecp256k1_v0_4_1_context_set_illegal_callback
* for that). After this callback returns, anything may happen, including
* crashing.
*
* Args: ctx: an existing context object (cannot be NULL)
* In: fun: a pointer to a function to call when an internal error occurs,
* taking a message and an opaque pointer (NULL restores the
* default handler, see rustsecp256k1_v0_4_0_context_set_illegal_callback
* default handler, see rustsecp256k1_v0_4_1_context_set_illegal_callback
* for details).
* data: the opaque pointer to pass to fun above.
*
* See also rustsecp256k1_v0_4_0_context_set_illegal_callback.
* See also rustsecp256k1_v0_4_1_context_set_illegal_callback.
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API void rustsecp256k1_v0_4_1_context_set_error_callback(
rustsecp256k1_v0_4_1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
@ -323,9 +337,9 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback(
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header
* byte 0x06 or 0x07) format public keys.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_parse(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey* pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_parse(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey* pubkey,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -340,19 +354,34 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_pa
* In/Out: outputlen: a pointer to an integer which is initially set to the
* size of output, and is overwritten with the written
* size.
* In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an
* initialized public key.
* flags: SECP256K1_EC_COMPRESSED if serialization should be in
* compressed format, otherwise SECP256K1_EC_UNCOMPRESSED.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ec_pubkey_serialize(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output,
size_t *outputlen,
const rustsecp256k1_v0_4_0_pubkey* pubkey,
const rustsecp256k1_v0_4_1_pubkey* pubkey,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Compare two public keys using lexicographic (of compressed serialization) order
*
* Returns: <0 if the first public key is less than the second
* >0 if the first public key is greater than the second
* 0 if the two public keys are equal
* Args: ctx: a secp256k1 context object.
* In: pubkey1: first public key to compare
* pubkey2: second public key to compare
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_cmp(
const rustsecp256k1_v0_4_1_context* ctx,
const rustsecp256k1_v0_4_1_pubkey* pubkey1,
const rustsecp256k1_v0_4_1_pubkey* pubkey2
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Parse an ECDSA signature in compact (64 bytes) format.
*
* Returns: 1 when the signature could be parsed, 0 otherwise.
@ -368,9 +397,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize(
* S are zero, the resulting sig value is guaranteed to fail validation for any
* message and public key.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const unsigned char *input64
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -389,9 +418,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(
* encoded numbers are out of range, signature validation with it is
* guaranteed to fail for every message and public key.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -407,11 +436,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(
* if 0 was returned).
* In: sig: a pointer to an initialized signature object
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output,
size_t *outputlen,
const rustsecp256k1_v0_4_0_ecdsa_signature* sig
const rustsecp256k1_v0_4_1_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Serialize an ECDSA signature in compact (64 byte) format.
@ -421,12 +450,12 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(
* Out: output64: a pointer to a 64-byte array to store the compact serialization
* In: sig: a pointer to an initialized signature object
*
* See rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact for details about the encoding.
* See rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact for details about the encoding.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output64,
const rustsecp256k1_v0_4_0_ecdsa_signature* sig
const rustsecp256k1_v0_4_1_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Verify an ECDSA signature.
@ -449,16 +478,16 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(
* form are accepted.
*
* If you need to accept ECDSA signatures from sources that do not obey this
* rule, apply rustsecp256k1_v0_4_0_ecdsa_signature_normalize to the signature prior to
* rule, apply rustsecp256k1_v0_4_1_ecdsa_signature_normalize to the signature prior to
* validation, but be aware that doing so results in malleable signatures.
*
* For details, see the comments for that function.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify(
const rustsecp256k1_v0_4_0_context* ctx,
const rustsecp256k1_v0_4_0_ecdsa_signature *sig,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_verify(
const rustsecp256k1_v0_4_1_context* ctx,
const rustsecp256k1_v0_4_1_ecdsa_signature *sig,
const unsigned char *msghash32,
const rustsecp256k1_v0_4_0_pubkey *pubkey
const rustsecp256k1_v0_4_1_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Convert a signature to a normalized lower-S form.
@ -498,25 +527,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify
* accept various non-unique encodings, so care should be taken when this
* property is required for an application.
*
* The rustsecp256k1_v0_4_0_ecdsa_sign function will by default create signatures in the
* lower-S form, and rustsecp256k1_v0_4_0_ecdsa_verify will not accept others. In case
* The rustsecp256k1_v0_4_1_ecdsa_sign function will by default create signatures in the
* lower-S form, and rustsecp256k1_v0_4_1_ecdsa_verify will not accept others. In case
* signatures come from a system that cannot enforce this property,
* rustsecp256k1_v0_4_0_ecdsa_signature_normalize must be called before verification.
* rustsecp256k1_v0_4_1_ecdsa_signature_normalize must be called before verification.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_normalize(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature *sigout,
const rustsecp256k1_v0_4_0_ecdsa_signature *sigin
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_normalize(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature *sigout,
const rustsecp256k1_v0_4_1_ecdsa_signature *sigin
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3);
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
* extra entropy.
*/
SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979;
SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979;
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_0_nonce_function_rfc6979). */
SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default;
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_1_nonce_function_rfc6979). */
SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default;
/** Create an ECDSA signature.
*
@ -526,18 +555,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
* In: msghash32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
*
* The created signature is always in lower-S form. See
* rustsecp256k1_v0_4_0_ecdsa_signature_normalize for more details.
* rustsecp256k1_v0_4_1_ecdsa_signature_normalize for more details.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature *sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature *sig,
const unsigned char *msghash32,
const unsigned char *seckey,
rustsecp256k1_v0_4_0_nonce_function noncefp,
rustsecp256k1_v0_4_1_nonce_function noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -553,8 +582,8 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign(
* Args: ctx: pointer to a context object (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_verify(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_verify(
const rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -566,32 +595,32 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_ve
* Out: pubkey: pointer to the created public key (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_create(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_create(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Negates a secret key in place.
*
* Returns: 0 if the given secret key is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify. 1 otherwise
* rustsecp256k1_v0_4_1_ec_seckey_verify. 1 otherwise
* Args: ctx: pointer to a context object
* In/Out: seckey: pointer to the 32-byte secret key to be negated. If the
* secret key is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0 and
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0 and
* seckey will be set to some unspecified value. (cannot be
* NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_negate, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_negate, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -601,9 +630,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_n
* Args: ctx: pointer to a context object
* In/Out: pubkey: pointer to the public key to be negated (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Tweak a secret key by adding tweak to it.
@ -613,24 +642,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_ne
* otherwise.
* Args: ctx: pointer to a context object (cannot be NULL).
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
* invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this
* invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this
* function returns 0. seckey will be set to some unspecified
* value if this function returns 0. (cannot be NULL)
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -645,13 +674,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
* invalid value if this function returns 0 (cannot be NULL).
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -660,24 +689,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw
* Returns: 0 if the arguments are invalid. 1 otherwise.
* Args: ctx: pointer to a context object (cannot be NULL).
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
* invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this
* invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this
* function returns 0. seckey will be set to some unspecified
* value if this function returns 0. (cannot be NULL)
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -690,13 +719,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
* invalid value if this function returns 0 (cannot be NULL).
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -721,12 +750,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw
* guaranteed and may change in the future. It is safe to call this function on
* contexts not initialized for signing; then it will have no effect and return 1.
*
* You should call this after rustsecp256k1_v0_4_0_context_create or
* rustsecp256k1_v0_4_0_context_clone (and rustsecp256k1_v0_4_0_context_preallocated_create or
* rustsecp256k1_v0_4_0_context_clone, resp.), and you may call this repeatedly afterwards.
* You should call this after rustsecp256k1_v0_4_1_context_create or
* rustsecp256k1_v0_4_1_context_clone (and rustsecp256k1_v0_4_1_context_preallocated_create or
* rustsecp256k1_v0_4_1_context_clone, resp.), and you may call this repeatedly afterwards.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_randomize(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_context_randomize(
rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *seed32
) SECP256K1_ARG_NONNULL(1);
@ -740,10 +769,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_rand
* In: ins: pointer to array of pointers to public keys (cannot be NULL)
* n: the number of public keys to add together (must be at least 1)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_combine(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *out,
const rustsecp256k1_v0_4_0_pubkey * const * ins,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_combine(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *out,
const rustsecp256k1_v0_4_1_pubkey * const * ins,
size_t n
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -7,11 +7,13 @@ extern "C" {
#include <stddef.h>
/* These rules specify the order of arguments in API calls:
/* Unless explicitly stated all pointer arguments must not be NULL.
*
* The following rules specify the order of arguments in API calls:
*
* 1. Context pointers go first, followed by output arguments, combined
* output/input arguments, and finally input-only arguments.
* 2. Array lengths always immediately the follow the argument whose length
* 2. Array lengths always immediately follow the argument whose length
* they describe, even if this violates rule 1.
* 3. Within the OUT/OUTIN/IN groups, pointers to data that is typically generated
* later go first. This means: signatures, public nonces, secret nonces,
@ -35,13 +37,13 @@ extern "C" {
* A constructed context can safely be used from multiple threads
* simultaneously, but API calls that take a non-const pointer to a context
* need exclusive access to it. In particular this is the case for
* rustsecp256k1_v0_4_0_context_destroy, rustsecp256k1_v0_4_0_context_preallocated_destroy,
* and rustsecp256k1_v0_4_0_context_randomize.
* rustsecp256k1_v0_4_1_context_destroy, rustsecp256k1_v0_4_1_context_preallocated_destroy,
* and rustsecp256k1_v0_4_1_context_randomize.
*
* Regarding randomization, either do it once at creation time (in which case
* you do not need any locking for the other calls), or use a read-write lock.
*/
typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context;
typedef struct rustsecp256k1_v0_4_1_context_struct rustsecp256k1_v0_4_1_context;
/** Opaque data structure that holds rewriteable "scratch space"
*
@ -54,19 +56,20 @@ typedef struct rustsecp256k1_v0_4_0_context_struct rustsecp256k1_v0_4_0_context;
* Unlike the context object, this cannot safely be shared between threads
* without additional synchronization logic.
*/
typedef struct rustsecp256k1_v0_4_0_scratch_space_struct rustsecp256k1_v0_4_0_scratch_space;
typedef struct rustsecp256k1_v0_4_1_scratch_space_struct rustsecp256k1_v0_4_1_scratch_space;
/** Opaque data structure that holds a parsed and valid public key.
*
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use rustsecp256k1_v0_4_0_ec_pubkey_serialize and rustsecp256k1_v0_4_0_ec_pubkey_parse.
* If you need to convert to a format suitable for storage or transmission,
* use rustsecp256k1_v0_4_1_ec_pubkey_serialize and rustsecp256k1_v0_4_1_ec_pubkey_parse. To
* compare keys, use rustsecp256k1_v0_4_1_ec_pubkey_cmp.
*/
typedef struct {
unsigned char data[64];
} rustsecp256k1_v0_4_0_pubkey;
} rustsecp256k1_v0_4_1_pubkey;
/** Opaque data structured that holds a parsed ECDSA signature.
*
@ -74,12 +77,12 @@ typedef struct {
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions.
* comparison, use the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions.
*/
typedef struct {
unsigned char data[64];
} rustsecp256k1_v0_4_0_ecdsa_signature;
} rustsecp256k1_v0_4_1_ecdsa_signature;
/** A pointer to a function to deterministically generate a nonce.
*
@ -97,7 +100,7 @@ typedef struct {
* Except for test cases, this function should compute some cryptographic hash of
* the message, the algorithm, the key and the attempt.
*/
typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
typedef int (*rustsecp256k1_v0_4_1_nonce_function)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
@ -127,6 +130,17 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
# define SECP256K1_INLINE inline
# endif
/** When this header is used at build-time the SECP256K1_BUILD define needs to be set
* to correctly setup export attributes and nullness checks. This is normally done
* by secp256k1.c but to guard against this header being included before secp256k1.c
* has had a chance to set the define (e.g. via test harnesses that just includes
* secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the
* BUILD define so this condition can be caught.
*/
#ifndef SECP256K1_BUILD
# define SECP256K1_NO_BUILD
#endif
#ifndef SECP256K1_API
# if defined(_WIN32)
# ifdef SECP256K1_BUILD
@ -165,14 +179,14 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
#define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10)
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
/** Flags to pass to rustsecp256k1_v0_4_0_context_create, rustsecp256k1_v0_4_0_context_preallocated_size, and
* rustsecp256k1_v0_4_0_context_preallocated_create. */
/** Flags to pass to rustsecp256k1_v0_4_1_context_create, rustsecp256k1_v0_4_1_context_preallocated_size, and
* rustsecp256k1_v0_4_1_context_preallocated_create. */
#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
#define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY)
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
/** Flag to pass to rustsecp256k1_v0_4_0_ec_pubkey_serialize. */
/** Flag to pass to rustsecp256k1_v0_4_1_ec_pubkey_serialize. */
#define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
#define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION)
@ -188,20 +202,20 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function)(
* API consistency, but currently do not require expensive precomputations or dynamic
* allocations.
*/
SECP256K1_API extern const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp;
SECP256K1_API extern const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp;
/** Create a secp256k1 context object (in dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h.
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h.
*
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
*
* See also rustsecp256k1_v0_4_0_context_randomize.
* See also rustsecp256k1_v0_4_1_context_randomize.
*/
SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create(
SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_create(
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
@ -209,30 +223,30 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_create(
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_0_preallocated.h.
* memory allocation entirely, see the functions in rustsecp256k1_v0_4_1_preallocated.h.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
*/
SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_clone(
const rustsecp256k1_v0_4_0_context* ctx
SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_clone(
const rustsecp256k1_v0_4_1_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Destroy a secp256k1 context object (created in dynamically allocated memory).
*
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using rustsecp256k1_v0_4_0_context_create
* or rustsecp256k1_v0_4_0_context_clone. If the context has instead been created using
* rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone, the
* behaviour is undefined. In that case, rustsecp256k1_v0_4_0_context_preallocated_destroy must
* The context to destroy must have been created using rustsecp256k1_v0_4_1_context_create
* or rustsecp256k1_v0_4_1_context_clone. If the context has instead been created using
* rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone, the
* behaviour is undefined. In that case, rustsecp256k1_v0_4_1_context_preallocated_destroy must
* be used instead.
*
* Args: ctx: an existing context to destroy, constructed using
* rustsecp256k1_v0_4_0_context_create or rustsecp256k1_v0_4_0_context_clone
* rustsecp256k1_v0_4_1_context_create or rustsecp256k1_v0_4_1_context_clone
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy(
rustsecp256k1_v0_4_0_context* ctx
SECP256K1_API void rustsecp256k1_v0_4_1_context_destroy(
rustsecp256k1_v0_4_1_context* ctx
);
/** Set a callback function to be called when an illegal argument is passed to
@ -256,11 +270,11 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy(
* USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build
* has been configured with --enable-external-default-callbacks. Then the
* following two symbols must be provided to link against:
* - void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* message, void* data);
* The library can call these default handlers even before a proper callback data
* pointer could have been set using rustsecp256k1_v0_4_0_context_set_illegal_callback or
* rustsecp256k1_v0_4_0_context_set_error_callback, e.g., when the creation of a context
* pointer could have been set using rustsecp256k1_v0_4_1_context_set_illegal_callback or
* rustsecp256k1_v0_4_1_context_set_error_callback, e.g., when the creation of a context
* fails. In this case, the corresponding default handler will be called with
* the data pointer argument set to NULL.
*
@ -270,10 +284,10 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_destroy(
* (NULL restores the default handler.)
* data: the opaque pointer to pass to fun above.
*
* See also rustsecp256k1_v0_4_0_context_set_error_callback.
* See also rustsecp256k1_v0_4_1_context_set_error_callback.
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API void rustsecp256k1_v0_4_1_context_set_illegal_callback(
rustsecp256k1_v0_4_1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
@ -284,21 +298,21 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_illegal_callback(
* This can only trigger in case of a hardware failure, miscompilation,
* memory corruption, serious bug in the library, or other error would can
* otherwise result in undefined behaviour. It will not trigger due to mere
* incorrect usage of the API (see rustsecp256k1_v0_4_0_context_set_illegal_callback
* incorrect usage of the API (see rustsecp256k1_v0_4_1_context_set_illegal_callback
* for that). After this callback returns, anything may happen, including
* crashing.
*
* Args: ctx: an existing context object (cannot be NULL)
* In: fun: a pointer to a function to call when an internal error occurs,
* taking a message and an opaque pointer (NULL restores the
* default handler, see rustsecp256k1_v0_4_0_context_set_illegal_callback
* default handler, see rustsecp256k1_v0_4_1_context_set_illegal_callback
* for details).
* data: the opaque pointer to pass to fun above.
*
* See also rustsecp256k1_v0_4_0_context_set_illegal_callback.
* See also rustsecp256k1_v0_4_1_context_set_illegal_callback.
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API void rustsecp256k1_v0_4_1_context_set_error_callback(
rustsecp256k1_v0_4_1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
@ -310,8 +324,8 @@ SECP256K1_API void rustsecp256k1_v0_4_0_context_set_error_callback(
* In: size: amount of memory to be available as scratch space. Some extra
* (<100 bytes) will be allocated for extra accounting.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_0_scratch_space* rustsecp256k1_v0_4_0_scratch_space_create(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_1_scratch_space* rustsecp256k1_v0_4_1_scratch_space_create(
const rustsecp256k1_v0_4_1_context* ctx,
size_t size
) SECP256K1_ARG_NONNULL(1);
@ -321,9 +335,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_4_0_scratch_space* r
* Args: ctx: a secp256k1 context object.
* scratch: space to destroy
*/
SECP256K1_API void rustsecp256k1_v0_4_0_scratch_space_destroy(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_scratch_space* scratch
SECP256K1_API void rustsecp256k1_v0_4_1_scratch_space_destroy(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_scratch_space* scratch
) SECP256K1_ARG_NONNULL(1);
/** Parse a variable-length public key into the pubkey object.
@ -340,9 +354,9 @@ SECP256K1_API void rustsecp256k1_v0_4_0_scratch_space_destroy(
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header
* byte 0x06 or 0x07) format public keys.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_parse(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey* pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_parse(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey* pubkey,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -357,19 +371,34 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_pa
* In/Out: outputlen: a pointer to an integer which is initially set to the
* size of output, and is overwritten with the written
* size.
* In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an
* initialized public key.
* flags: SECP256K1_EC_COMPRESSED if serialization should be in
* compressed format, otherwise SECP256K1_EC_UNCOMPRESSED.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ec_pubkey_serialize(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output,
size_t *outputlen,
const rustsecp256k1_v0_4_0_pubkey* pubkey,
const rustsecp256k1_v0_4_1_pubkey* pubkey,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Compare two public keys using lexicographic (of compressed serialization) order
*
* Returns: <0 if the first public key is less than the second
* >0 if the first public key is greater than the second
* 0 if the two public keys are equal
* Args: ctx: a secp256k1 context object.
* In: pubkey1: first public key to compare
* pubkey2: second public key to compare
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_cmp(
const rustsecp256k1_v0_4_1_context* ctx,
const rustsecp256k1_v0_4_1_pubkey* pubkey1,
const rustsecp256k1_v0_4_1_pubkey* pubkey2
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Parse an ECDSA signature in compact (64 bytes) format.
*
* Returns: 1 when the signature could be parsed, 0 otherwise.
@ -385,9 +414,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ec_pubkey_serialize(
* S are zero, the resulting sig value is guaranteed to fail validation for any
* message and public key.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const unsigned char *input64
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -406,9 +435,9 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(
* encoded numbers are out of range, signature validation with it is
* guaranteed to fail for every message and public key.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -424,11 +453,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(
* if 0 was returned).
* In: sig: a pointer to an initialized signature object
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output,
size_t *outputlen,
const rustsecp256k1_v0_4_0_ecdsa_signature* sig
const rustsecp256k1_v0_4_1_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Serialize an ECDSA signature in compact (64 byte) format.
@ -438,12 +467,12 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(
* Out: output64: a pointer to a 64-byte array to store the compact serialization
* In: sig: a pointer to an initialized signature object
*
* See rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact for details about the encoding.
* See rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact for details about the encoding.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output64,
const rustsecp256k1_v0_4_0_ecdsa_signature* sig
const rustsecp256k1_v0_4_1_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Verify an ECDSA signature.
@ -466,16 +495,16 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(
* form are accepted.
*
* If you need to accept ECDSA signatures from sources that do not obey this
* rule, apply rustsecp256k1_v0_4_0_ecdsa_signature_normalize to the signature prior to
* rule, apply rustsecp256k1_v0_4_1_ecdsa_signature_normalize to the signature prior to
* validation, but be aware that doing so results in malleable signatures.
*
* For details, see the comments for that function.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify(
const rustsecp256k1_v0_4_0_context* ctx,
const rustsecp256k1_v0_4_0_ecdsa_signature *sig,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_verify(
const rustsecp256k1_v0_4_1_context* ctx,
const rustsecp256k1_v0_4_1_ecdsa_signature *sig,
const unsigned char *msghash32,
const rustsecp256k1_v0_4_0_pubkey *pubkey
const rustsecp256k1_v0_4_1_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Convert a signature to a normalized lower-S form.
@ -515,25 +544,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_verify
* accept various non-unique encodings, so care should be taken when this
* property is required for an application.
*
* The rustsecp256k1_v0_4_0_ecdsa_sign function will by default create signatures in the
* lower-S form, and rustsecp256k1_v0_4_0_ecdsa_verify will not accept others. In case
* The rustsecp256k1_v0_4_1_ecdsa_sign function will by default create signatures in the
* lower-S form, and rustsecp256k1_v0_4_1_ecdsa_verify will not accept others. In case
* signatures come from a system that cannot enforce this property,
* rustsecp256k1_v0_4_0_ecdsa_signature_normalize must be called before verification.
* rustsecp256k1_v0_4_1_ecdsa_signature_normalize must be called before verification.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_signature_normalize(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature *sigout,
const rustsecp256k1_v0_4_0_ecdsa_signature *sigin
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_signature_normalize(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature *sigout,
const rustsecp256k1_v0_4_1_ecdsa_signature *sigin
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3);
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
* extra entropy.
*/
SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979;
SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979;
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_0_nonce_function_rfc6979). */
SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default;
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_4_1_nonce_function_rfc6979). */
SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default;
/** Create an ECDSA signature.
*
@ -543,18 +572,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
* In: msghash32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
*
* The created signature is always in lower-S form. See
* rustsecp256k1_v0_4_0_ecdsa_signature_normalize for more details.
* rustsecp256k1_v0_4_1_ecdsa_signature_normalize for more details.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature *sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature *sig,
const unsigned char *msghash32,
const unsigned char *seckey,
rustsecp256k1_v0_4_0_nonce_function noncefp,
rustsecp256k1_v0_4_1_nonce_function noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -570,8 +599,8 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign(
* Args: ctx: pointer to a context object (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_verify(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_verify(
const rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -583,32 +612,32 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_ve
* Out: pubkey: pointer to the created public key (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_create(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_create(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Negates a secret key in place.
*
* Returns: 0 if the given secret key is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify. 1 otherwise
* rustsecp256k1_v0_4_1_ec_seckey_verify. 1 otherwise
* Args: ctx: pointer to a context object
* In/Out: seckey: pointer to the 32-byte secret key to be negated. If the
* secret key is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0 and
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0 and
* seckey will be set to some unspecified value. (cannot be
* NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_negate, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_negate, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -618,9 +647,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_n
* Args: ctx: pointer to a context object
* In/Out: pubkey: pointer to the public key to be negated (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_negate(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_negate(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Tweak a secret key by adding tweak to it.
@ -630,24 +659,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_ne
* otherwise.
* Args: ctx: pointer to a context object (cannot be NULL).
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
* invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this
* invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this
* function returns 0. seckey will be set to some unspecified
* value if this function returns 0. (cannot be NULL)
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_add, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -662,13 +691,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
* invalid value if this function returns 0 (cannot be NULL).
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -677,24 +706,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw
* Returns: 0 if the arguments are invalid. 1 otherwise.
* Args: ctx: pointer to a context object (cannot be NULL).
* In/Out: seckey: pointer to a 32-byte secret key. If the secret key is
* invalid according to rustsecp256k1_v0_4_0_ec_seckey_verify, this
* invalid according to rustsecp256k1_v0_4_1_ec_seckey_verify, this
* function returns 0. seckey will be set to some unspecified
* value if this function returns 0. (cannot be NULL)
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Same as rustsecp256k1_v0_4_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
/** Same as rustsecp256k1_v0_4_1_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in
* future versions. */
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -707,13 +736,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_privkey_t
* In/Out: pubkey: pointer to a public key object. pubkey will be set to an
* invalid value if this function returns 0 (cannot be NULL).
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to
* rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -738,12 +767,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_tw
* guaranteed and may change in the future. It is safe to call this function on
* contexts not initialized for signing; then it will have no effect and return 1.
*
* You should call this after rustsecp256k1_v0_4_0_context_create or
* rustsecp256k1_v0_4_0_context_clone (and rustsecp256k1_v0_4_0_context_preallocated_create or
* rustsecp256k1_v0_4_0_context_clone, resp.), and you may call this repeatedly afterwards.
* You should call this after rustsecp256k1_v0_4_1_context_create or
* rustsecp256k1_v0_4_1_context_clone (and rustsecp256k1_v0_4_1_context_preallocated_create or
* rustsecp256k1_v0_4_1_context_clone, resp.), and you may call this repeatedly afterwards.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_randomize(
rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_context_randomize(
rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *seed32
) SECP256K1_ARG_NONNULL(1);
@ -757,10 +786,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_context_rand
* In: ins: pointer to array of pointers to public keys (cannot be NULL)
* n: the number of public keys to add together (must be at least 1)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ec_pubkey_combine(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *out,
const rustsecp256k1_v0_4_0_pubkey * const * ins,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ec_pubkey_combine(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *out,
const rustsecp256k1_v0_4_1_pubkey * const * ins,
size_t n
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -10,15 +10,15 @@ extern "C" {
/** A pointer to a function that hashes an EC point to obtain an ECDH secret
*
* Returns: 1 if the point was successfully hashed.
* 0 will cause rustsecp256k1_v0_4_0_ecdh to fail and return 0.
* 0 will cause rustsecp256k1_v0_4_1_ecdh to fail and return 0.
* Other return values are not allowed, and the behaviour of
* rustsecp256k1_v0_4_0_ecdh is undefined for other return values.
* rustsecp256k1_v0_4_1_ecdh is undefined for other return values.
* Out: output: pointer to an array to be filled by the function
* In: x32: pointer to a 32-byte x coordinate
* y32: pointer to a 32-byte y coordinate
* data: arbitrary data pointer that is passed through
*/
typedef int (*rustsecp256k1_v0_4_0_ecdh_hash_function)(
typedef int (*rustsecp256k1_v0_4_1_ecdh_hash_function)(
unsigned char *output,
const unsigned char *x32,
const unsigned char *y32,
@ -27,11 +27,11 @@ typedef int (*rustsecp256k1_v0_4_0_ecdh_hash_function)(
/** An implementation of SHA256 hash function that applies to compressed public key.
* Populates the output parameter with 32 bytes. */
SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_sha256;
SECP256K1_API extern const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_sha256;
/** A default ECDH hash function (currently equal to rustsecp256k1_v0_4_0_ecdh_hash_function_sha256).
/** A default ECDH hash function (currently equal to rustsecp256k1_v0_4_1_ecdh_hash_function_sha256).
* Populates the output parameter with 32 bytes. */
SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_default;
SECP256K1_API extern const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_default;
/** Compute an EC Diffie-Hellman secret in constant time
*
@ -39,19 +39,19 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1
* 0: scalar was invalid (zero or overflow) or hashfp returned 0
* Args: ctx: pointer to a context object (cannot be NULL)
* Out: output: pointer to an array to be filled by hashfp
* In: pubkey: a pointer to a rustsecp256k1_v0_4_0_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_4_1_pubkey containing an
* initialized public key
* seckey: a 32-byte scalar with which to multiply the point
* hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_4_0_ecdh_hash_function_sha256 is used
* hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_4_1_ecdh_hash_function_sha256 is used
* (in which case, 32 bytes will be written to output)
* data: arbitrary data pointer that is passed through to hashfp
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdh(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdh(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output,
const rustsecp256k1_v0_4_0_pubkey *pubkey,
const rustsecp256k1_v0_4_1_pubkey *pubkey,
const unsigned char *seckey,
rustsecp256k1_v0_4_0_ecdh_hash_function hashfp,
rustsecp256k1_v0_4_1_ecdh_hash_function hashfp,
void *data
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);

View File

@ -15,13 +15,13 @@ extern "C" {
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use rustsecp256k1_v0_4_0_xonly_pubkey_serialize and
* rustsecp256k1_v0_4_0_xonly_pubkey_parse.
* If you need to convert to a format suitable for storage, transmission, use
* use rustsecp256k1_v0_4_1_xonly_pubkey_serialize and rustsecp256k1_v0_4_1_xonly_pubkey_parse. To
* compare keys, use rustsecp256k1_v0_4_1_xonly_pubkey_cmp.
*/
typedef struct {
unsigned char data[64];
} rustsecp256k1_v0_4_0_xonly_pubkey;
} rustsecp256k1_v0_4_1_xonly_pubkey;
/** Opaque data structure that holds a keypair consisting of a secret and a
* public key.
@ -32,7 +32,7 @@ typedef struct {
*/
typedef struct {
unsigned char data[96];
} rustsecp256k1_v0_4_0_keypair;
} rustsecp256k1_v0_4_1_keypair;
/** Parse a 32-byte sequence into a xonly_pubkey object.
*
@ -45,9 +45,9 @@ typedef struct {
* (cannot be NULL).
* In: input32: pointer to a serialized xonly_pubkey (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_parse(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_xonly_pubkey* pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_parse(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_xonly_pubkey* pubkey,
const unsigned char *input32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -58,16 +58,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey
* Args: ctx: a secp256k1 context object (cannot be NULL).
* Out: output32: a pointer to a 32-byte array to place the serialized key in
* (cannot be NULL).
* In: pubkey: a pointer to a rustsecp256k1_v0_4_0_xonly_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_4_1_xonly_pubkey containing an
* initialized public key (cannot be NULL).
*/
SECP256K1_API int rustsecp256k1_v0_4_0_xonly_pubkey_serialize(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_xonly_pubkey_serialize(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output32,
const rustsecp256k1_v0_4_0_xonly_pubkey* pubkey
const rustsecp256k1_v0_4_1_xonly_pubkey* pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Converts a rustsecp256k1_v0_4_0_pubkey into a rustsecp256k1_v0_4_0_xonly_pubkey.
/** Compare two x-only public keys using lexicographic order
*
* Returns: <0 if the first public key is less than the second
* >0 if the first public key is greater than the second
* 0 if the two public keys are equal
* Args: ctx: a secp256k1 context object.
* In: pubkey1: first public key to compare
* pubkey2: second public key to compare
*/
SECP256K1_API int rustsecp256k1_v0_4_1_xonly_pubkey_cmp(
const rustsecp256k1_v0_4_1_context* ctx,
const rustsecp256k1_v0_4_1_xonly_pubkey* pk1,
const rustsecp256k1_v0_4_1_xonly_pubkey* pk2
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Converts a rustsecp256k1_v0_4_1_pubkey into a rustsecp256k1_v0_4_1_xonly_pubkey.
*
* Returns: 1 if the public key was successfully converted
* 0 otherwise
@ -80,11 +95,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_xonly_pubkey_serialize(
* set to 0 otherwise. (can be NULL)
* In: pubkey: pointer to a public key that is converted (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_xonly_pubkey *xonly_pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_xonly_pubkey *xonly_pubkey,
int *pk_parity,
const rustsecp256k1_v0_4_0_pubkey *pubkey
const rustsecp256k1_v0_4_1_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
/** Tweak an x-only public key by adding the generator multiplied with tweak32
@ -92,7 +107,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey
*
* Note that the resulting point can not in general be represented by an x-only
* pubkey because it may have an odd Y coordinate. Instead, the output_pubkey
* is a normal rustsecp256k1_v0_4_0_pubkey.
* is a normal rustsecp256k1_v0_4_1_pubkey.
*
* Returns: 0 if the arguments are invalid or the resulting public key would be
* invalid (only when the tweak is the negation of the corresponding
@ -106,24 +121,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey
* In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to.
* (cannot be NULL).
* tweak32: pointer to a 32-byte tweak. If the tweak is invalid
* according to rustsecp256k1_v0_4_0_ec_seckey_verify, this function
* according to rustsecp256k1_v0_4_1_ec_seckey_verify, this function
* returns 0. For uniformly random 32-byte arrays the
* chance of being invalid is negligible (around 1 in
* 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *output_pubkey,
const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *output_pubkey,
const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Checks that a tweaked pubkey is the result of calling
* rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32.
* rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add with internal_pubkey and tweak32.
*
* The tweaked pubkey is represented by its 32-byte x-only serialization and
* its pk_parity, which can both be obtained by converting the result of
* tweak_add to a rustsecp256k1_v0_4_0_xonly_pubkey.
* tweak_add to a rustsecp256k1_v0_4_1_xonly_pubkey.
*
* Note that this alone does _not_ verify that the tweaked pubkey is a
* commitment. If the tweak is not chosen in a specific way, the tweaked pubkey
@ -137,17 +152,17 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey
* tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization
* is passed in as tweaked_pubkey32). This must match the
* pk_parity value that is returned when calling
* rustsecp256k1_v0_4_0_xonly_pubkey with the tweaked pubkey, or
* rustsecp256k1_v0_4_1_xonly_pubkey with the tweaked pubkey, or
* this function will fail.
* internal_pubkey: pointer to an x-only public key object to apply the
* tweak to (cannot be NULL)
* tweak32: pointer to a 32-byte tweak (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(
const rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *tweaked_pubkey32,
int tweaked_pk_parity,
const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey,
const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5);
@ -159,12 +174,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_xonly_pubkey
* Out: keypair: pointer to the created keypair (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_create(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_keypair *keypair,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_create(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_keypair *keypair,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Get the secret key from a keypair.
*
* Returns: 0 if the arguments are invalid. 1 otherwise.
* Args: ctx: pointer to a context object (cannot be NULL)
* Out: seckey: pointer to a 32-byte buffer for the secret key (cannot be NULL)
* In: keypair: pointer to a keypair (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_sec(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *seckey,
const rustsecp256k1_v0_4_1_keypair *keypair
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Get the public key from a keypair.
*
* Returns: 0 if the arguments are invalid. 1 otherwise.
@ -174,16 +202,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_crea
* (cannot be NULL)
* In: keypair: pointer to a keypair (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_pub(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
const rustsecp256k1_v0_4_0_keypair *keypair
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_pub(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const rustsecp256k1_v0_4_1_keypair *keypair
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Get the x-only public key from a keypair.
*
* This is the same as calling rustsecp256k1_v0_4_0_keypair_pub and then
* rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey.
* This is the same as calling rustsecp256k1_v0_4_1_keypair_pub and then
* rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey.
*
* Returns: 0 if the arguments are invalid. 1 otherwise.
* Args: ctx: pointer to a context object (cannot be NULL)
@ -192,22 +220,22 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_pub(
* xonly_pubkey. If not, it's set to an invalid value (cannot be
* NULL).
* pk_parity: pointer to an integer that will be set to the pk_parity
* argument of rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey (can be NULL).
* argument of rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey (can be NULL).
* In: keypair: pointer to a keypair (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonly_pub(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_xonly_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_xonly_pub(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_xonly_pubkey *pubkey,
int *pk_parity,
const rustsecp256k1_v0_4_0_keypair *keypair
const rustsecp256k1_v0_4_1_keypair *keypair
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
/** Tweak a keypair by adding tweak32 to the secret key and updating the public
* key accordingly.
*
* Calling this function and then rustsecp256k1_v0_4_0_keypair_pub results in the same
* public key as calling rustsecp256k1_v0_4_0_keypair_xonly_pub and then
* rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add.
* Calling this function and then rustsecp256k1_v0_4_1_keypair_pub results in the same
* public key as calling rustsecp256k1_v0_4_1_keypair_xonly_pub and then
* rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add.
*
* Returns: 0 if the arguments are invalid or the resulting keypair would be
* invalid (only when the tweak is the negation of the keypair's
@ -219,13 +247,13 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonl
* an invalid value if this function returns 0 (cannot be
* NULL).
* In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according
* to rustsecp256k1_v0_4_0_ec_seckey_verify, this function returns 0. For
* to rustsecp256k1_v0_4_1_ec_seckey_verify, this function returns 0. For
* uniformly random 32-byte arrays the chance of being invalid
* is negligible (around 1 in 2^128) (cannot be NULL).
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_keypair *keypair,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_keypair *keypair,
const unsigned char *tweak32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -16,8 +16,8 @@ extern "C" {
* objects created by functions in secp256k1.h, i.e., they can be passed to any
* API function that expects a context object (see secp256k1.h for details). The
* only exception is that context objects created by functions in this module
* must be destroyed using rustsecp256k1_v0_4_0_context_preallocated_destroy (in this
* module) instead of rustsecp256k1_v0_4_0_context_destroy (in secp256k1.h).
* must be destroyed using rustsecp256k1_v0_4_1_context_preallocated_destroy (in this
* module) instead of rustsecp256k1_v0_4_1_context_destroy (in secp256k1.h).
*
* It is guaranteed that functions in this module will not call malloc or its
* friends realloc, calloc, and free.
@ -27,24 +27,24 @@ extern "C" {
* caller-provided memory.
*
* The purpose of this function is to determine how much memory must be provided
* to rustsecp256k1_v0_4_0_context_preallocated_create.
* to rustsecp256k1_v0_4_1_context_preallocated_create.
*
* Returns: the required size of the caller-provided memory block
* In: flags: which parts of the context to initialize.
*/
SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_size(
SECP256K1_API size_t rustsecp256k1_v0_4_1_context_preallocated_size(
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
/** Create a secp256k1 context object in caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) bytes, suitably
* of size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, which begins with the call to this
* function and ends when a call to rustsecp256k1_v0_4_0_context_preallocated_destroy
* function and ends when a call to rustsecp256k1_v0_4_1_context_preallocated_destroy
* (which destroys the context object again) returns. During the lifetime of the
* context object, the caller is obligated not to access this block of memory,
* i.e., the caller may not read or write the memory, e.g., by copying the memory
@ -54,14 +54,14 @@ SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_size(
*
* Returns: a newly created context object.
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags)
* size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
* flags: which parts of the context to initialize.
*
* See also rustsecp256k1_v0_4_0_context_randomize (in secp256k1.h)
* and rustsecp256k1_v0_4_0_context_preallocated_destroy.
* See also rustsecp256k1_v0_4_1_context_randomize (in secp256k1.h)
* and rustsecp256k1_v0_4_1_context_preallocated_destroy.
*/
SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_create(
SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_create(
void* prealloc,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
@ -72,28 +72,28 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallo
* Returns: the required size of the caller-provided memory block.
* In: ctx: an existing context to copy (cannot be NULL)
*/
SECP256K1_API size_t rustsecp256k1_v0_4_0_context_preallocated_clone_size(
const rustsecp256k1_v0_4_0_context* ctx
SECP256K1_API size_t rustsecp256k1_v0_4_1_context_preallocated_clone_size(
const rustsecp256k1_v0_4_1_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Copy a secp256k1 context object into caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags) bytes, suitably
* of size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, see the description of
* rustsecp256k1_v0_4_0_context_preallocated_create for details.
* rustsecp256k1_v0_4_1_context_preallocated_create for details.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least rustsecp256k1_v0_4_0_context_preallocated_size(flags)
* size at least rustsecp256k1_v0_4_1_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
*/
SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_clone(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_clone(
const rustsecp256k1_v0_4_1_context* ctx,
void* prealloc
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT;
@ -103,22 +103,22 @@ SECP256K1_API rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallo
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using
* rustsecp256k1_v0_4_0_context_preallocated_create or rustsecp256k1_v0_4_0_context_preallocated_clone.
* If the context has instead been created using rustsecp256k1_v0_4_0_context_create or
* rustsecp256k1_v0_4_0_context_clone, the behaviour is undefined. In that case,
* rustsecp256k1_v0_4_0_context_destroy must be used instead.
* rustsecp256k1_v0_4_1_context_preallocated_create or rustsecp256k1_v0_4_1_context_preallocated_clone.
* If the context has instead been created using rustsecp256k1_v0_4_1_context_create or
* rustsecp256k1_v0_4_1_context_clone, the behaviour is undefined. In that case,
* rustsecp256k1_v0_4_1_context_destroy must be used instead.
*
* If required, it is the responsibility of the caller to deallocate the block
* of memory properly after this function returns, e.g., by calling free on the
* preallocated pointer given to rustsecp256k1_v0_4_0_context_preallocated_create or
* rustsecp256k1_v0_4_0_context_preallocated_clone.
* preallocated pointer given to rustsecp256k1_v0_4_1_context_preallocated_create or
* rustsecp256k1_v0_4_1_context_preallocated_clone.
*
* Args: ctx: an existing context to destroy, constructed using
* rustsecp256k1_v0_4_0_context_preallocated_create or
* rustsecp256k1_v0_4_0_context_preallocated_clone (cannot be NULL)
* rustsecp256k1_v0_4_1_context_preallocated_create or
* rustsecp256k1_v0_4_1_context_preallocated_clone (cannot be NULL)
*/
SECP256K1_API void rustsecp256k1_v0_4_0_context_preallocated_destroy(
rustsecp256k1_v0_4_0_context* ctx
SECP256K1_API void rustsecp256k1_v0_4_1_context_preallocated_destroy(
rustsecp256k1_v0_4_1_context* ctx
);
#ifdef __cplusplus

View File

@ -14,8 +14,8 @@ extern "C" {
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 65 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage or transmission, use
* the rustsecp256k1_v0_4_0_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_0_ecdsa_signature_parse_* functions.
* the rustsecp256k1_v0_4_1_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_4_1_ecdsa_signature_parse_* functions.
*
* Furthermore, it is guaranteed that identical signatures (including their
* recoverability) will have identical representation, so they can be
@ -23,7 +23,7 @@ extern "C" {
*/
typedef struct {
unsigned char data[65];
} rustsecp256k1_v0_4_0_ecdsa_recoverable_signature;
} rustsecp256k1_v0_4_1_ecdsa_recoverable_signature;
/** Parse a compact ECDSA signature (64 bytes + recovery id).
*
@ -33,9 +33,9 @@ typedef struct {
* In: input64: a pointer to a 64-byte compact signature
* recid: the recovery id (0, 1, 2 or 3)
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig,
const unsigned char *input64,
int recid
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -46,10 +46,10 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact
* Out: sig: a pointer to a normal signature (cannot be NULL).
* In: sigin: a pointer to a recoverable signature (cannot be NULL).
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_signature* sig,
const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sigin
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_signature* sig,
const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sigin
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Serialize an ECDSA signature in compact format (64 bytes + recovery id).
@ -60,11 +60,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(
* recid: a pointer to an integer to hold the recovery id (can be NULL).
* In: sig: a pointer to an initialized signature object (cannot be NULL)
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *output64,
int *recid,
const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig
const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Create a recoverable ECDSA signature.
@ -75,15 +75,15 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_com
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
* In: msghash32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_default is used
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
*/
SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *sig,
SECP256K1_API int rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *sig,
const unsigned char *msghash32,
const unsigned char *seckey,
rustsecp256k1_v0_4_0_nonce_function noncefp,
rustsecp256k1_v0_4_1_nonce_function noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -96,10 +96,10 @@ SECP256K1_API int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(
* In: sig: pointer to initialized signature that supports pubkey recovery (cannot be NULL)
* msghash32: the 32-byte message hash assumed to be signed (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_ecdsa_recover(
const rustsecp256k1_v0_4_0_context* ctx,
rustsecp256k1_v0_4_0_pubkey *pubkey,
const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *sig,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_ecdsa_recover(
const rustsecp256k1_v0_4_1_context* ctx,
rustsecp256k1_v0_4_1_pubkey *pubkey,
const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *sig,
const unsigned char *msghash32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);

View File

@ -15,7 +15,7 @@ extern "C" {
/** A pointer to a function to deterministically generate a nonce.
*
* Same as rustsecp256k1_v0_4_0_nonce function with the exception of accepting an
* Same as rustsecp256k1_v0_4_1_nonce function with the exception of accepting an
* additional pubkey argument and not requiring an attempt argument. The pubkey
* argument can protect signature schemes with key-prefixed challenge hash
* inputs against reusing the nonce when signing with the wrong precomputed
@ -35,7 +35,7 @@ extern "C" {
* Except for test cases, this function should compute some cryptographic hash of
* the message, the key, the pubkey, the algorithm description, and data.
*/
typedef int (*rustsecp256k1_v0_4_0_nonce_function_hardened)(
typedef int (*rustsecp256k1_v0_4_1_nonce_function_hardened)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
@ -56,16 +56,16 @@ typedef int (*rustsecp256k1_v0_4_0_nonce_function_hardened)(
* bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set
* to "BIP0340/nonce\0\0\0"
*/
SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp256k1_v0_4_0_nonce_function_bip340;
SECP256K1_API extern const rustsecp256k1_v0_4_1_nonce_function_hardened rustsecp256k1_v0_4_1_nonce_function_bip340;
/** Create a Schnorr signature.
*
* Does _not_ strictly follow BIP-340 because it does not verify the resulting
* signature. Instead, you can manually use rustsecp256k1_v0_4_0_schnorrsig_verify and
* signature. Instead, you can manually use rustsecp256k1_v0_4_1_schnorrsig_verify and
* abort if it fails.
*
* Otherwise BIP-340 compliant if the noncefp argument is NULL or
* rustsecp256k1_v0_4_0_nonce_function_bip340 and the ndata argument is 32-byte auxiliary
* rustsecp256k1_v0_4_1_nonce_function_bip340 and the ndata argument is 32-byte auxiliary
* randomness.
*
* Returns 1 on success, 0 on failure.
@ -73,18 +73,18 @@ SECP256K1_API extern const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp
* Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL)
* In: msg32: the 32-byte message being signed (cannot be NULL)
* keypair: pointer to an initialized keypair (cannot be NULL)
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_0_nonce_function_bip340 is used
* noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1_v0_4_1_nonce_function_bip340 is used
* ndata: pointer to arbitrary data used by the nonce generation
* function (can be NULL). If it is non-NULL and
* rustsecp256k1_v0_4_0_nonce_function_bip340 is used, then ndata must be a
* rustsecp256k1_v0_4_1_nonce_function_bip340 is used, then ndata must be a
* pointer to 32-byte auxiliary randomness as per BIP-340.
*/
SECP256K1_API int rustsecp256k1_v0_4_0_schnorrsig_sign(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API int rustsecp256k1_v0_4_1_schnorrsig_sign(
const rustsecp256k1_v0_4_1_context* ctx,
unsigned char *sig64,
const unsigned char *msg32,
const rustsecp256k1_v0_4_0_keypair *keypair,
rustsecp256k1_v0_4_0_nonce_function_hardened noncefp,
const rustsecp256k1_v0_4_1_keypair *keypair,
rustsecp256k1_v0_4_1_nonce_function_hardened noncefp,
void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -97,11 +97,11 @@ SECP256K1_API int rustsecp256k1_v0_4_0_schnorrsig_sign(
* msg32: the 32-byte message being verified (cannot be NULL)
* pubkey: pointer to an x-only public key to verify with (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_0_schnorrsig_verify(
const rustsecp256k1_v0_4_0_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_4_1_schnorrsig_verify(
const rustsecp256k1_v0_4_1_context* ctx,
const unsigned char *sig64,
const unsigned char *msg32,
const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey
const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
#ifdef __cplusplus

View File

@ -1,4 +1,4 @@
load("rustsecp256k1_v0_4_0_params.sage")
load("rustsecp256k1_v0_4_1_params.sage")
orders_done = set()
results = {}
@ -95,13 +95,13 @@ for f in sorted(results.keys()):
G = results[f]["G"]
print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
first = False
print("static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST(")
print("static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(")
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(");")
print("static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(")
print("static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(")
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(");")

View File

@ -1,9 +1,9 @@
""" Generates the constants used in rustsecp256k1_v0_4_0_scalar_split_lambda.
""" Generates the constants used in rustsecp256k1_v0_4_1_scalar_split_lambda.
See the comments for rustsecp256k1_v0_4_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations.
See the comments for rustsecp256k1_v0_4_1_scalar_split_lambda in src/scalar_impl.h for detailed explanations.
"""
load("rustsecp256k1_v0_4_0_params.sage")
load("rustsecp256k1_v0_4_1_params.sage")
def inf_norm(v):
"""Returns the infinity norm of a vector."""
@ -24,17 +24,17 @@ def gauss_reduction(i1, i2):
v2[1] -= m*v1[1]
def find_split_constants_gauss():
"""Find constants for rustsecp256k1_v0_4_0_scalar_split_lamdba using gauss reduction."""
"""Find constants for rustsecp256k1_v0_4_1_scalar_split_lamdba using gauss reduction."""
(v11, v12), (v21, v22) = gauss_reduction([0, N], [1, int(LAMBDA)])
# We use related vectors in rustsecp256k1_v0_4_0_scalar_split_lambda.
# We use related vectors in rustsecp256k1_v0_4_1_scalar_split_lambda.
A1, B1 = -v21, -v11
A2, B2 = v22, -v21
return A1, B1, A2, B2
def find_split_constants_explicit_tof():
"""Find constants for rustsecp256k1_v0_4_0_scalar_split_lamdba using the trace of Frobenius.
"""Find constants for rustsecp256k1_v0_4_1_scalar_split_lamdba using the trace of Frobenius.
See Benjamin Smith: "Easy scalar decompositions for efficient scalar multiplication on
elliptic curves and genus 2 Jacobians" (https://eprint.iacr.org/2013/672), Example 2
@ -51,7 +51,7 @@ def find_split_constants_explicit_tof():
A2 = Integer((t + c)/2 - 1)
B2 = Integer(1 - (t - c)/2)
# We use a negated b values in rustsecp256k1_v0_4_0_scalar_split_lambda.
# We use a negated b values in rustsecp256k1_v0_4_1_scalar_split_lambda.
B1, B2 = -B1, -B2
return A1, B1, A2, B2
@ -90,7 +90,7 @@ def rnddiv2(v):
return v >> 1
def scalar_lambda_split(k):
"""Equivalent to rustsecp256k1_v0_4_0_scalar_lambda_split()."""
"""Equivalent to rustsecp256k1_v0_4_1_scalar_lambda_split()."""
c1 = rnddiv2((k * G1) >> 383)
c2 = rnddiv2((k * G2) >> 383)
c1 = (c1 * -B1) % N

View File

@ -5,8 +5,8 @@ import sys
load("group_prover.sage")
load("weierstrass_prover.sage")
def formula_rustsecp256k1_v0_4_0_gej_double_var(a):
"""libsecp256k1's rustsecp256k1_v0_4_0_gej_double_var, used by various addition functions"""
def formula_rustsecp256k1_v0_4_1_gej_double_var(a):
"""libsecp256k1's rustsecp256k1_v0_4_1_gej_double_var, used by various addition functions"""
rz = a.Z * a.Y
rz = rz * 2
t1 = a.X^2
@ -29,8 +29,8 @@ def formula_rustsecp256k1_v0_4_0_gej_double_var(a):
ry = ry + t2
return jacobianpoint(rx, ry, rz)
def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_0_gej_add_var"""
def formula_rustsecp256k1_v0_4_1_gej_add_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_1_gej_add_var"""
if branch == 0:
return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
if branch == 1:
@ -48,7 +48,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b):
i = -s1
i = i + s2
if branch == 2:
r = formula_rustsecp256k1_v0_4_0_gej_double_var(a)
r = formula_rustsecp256k1_v0_4_1_gej_double_var(a)
return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r)
if branch == 3:
return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -71,8 +71,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_var(branch, a, b):
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_0_gej_add_ge_var, which assume bz==1"""
def formula_rustsecp256k1_v0_4_1_gej_add_ge_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_1_gej_add_ge_var, which assume bz==1"""
if branch == 0:
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
if branch == 1:
@ -88,7 +88,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b):
i = -s1
i = i + s2
if (branch == 2):
r = formula_rustsecp256k1_v0_4_0_gej_double_var(a)
r = formula_rustsecp256k1_v0_4_1_gej_double_var(a)
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if (branch == 3):
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -110,8 +110,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_var(branch, a, b):
ry = ry + h3
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_0_gej_add_zinv_var"""
def formula_rustsecp256k1_v0_4_1_gej_add_zinv_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_1_gej_add_zinv_var"""
bzinv = b.Z^(-1)
if branch == 0:
return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a)
@ -134,7 +134,7 @@ def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b):
i = -s1
i = i + s2
if branch == 2:
r = formula_rustsecp256k1_v0_4_0_gej_double_var(a)
r = formula_rustsecp256k1_v0_4_1_gej_double_var(a)
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if branch == 3:
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -157,8 +157,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_zinv_var(branch, a, b):
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_rustsecp256k1_v0_4_0_gej_add_ge(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_0_gej_add_ge"""
def formula_rustsecp256k1_v0_4_1_gej_add_ge(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_4_1_gej_add_ge"""
zeroes = {}
nonzeroes = {}
a_infinity = False
@ -229,8 +229,8 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge(branch, a, b):
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity())
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz))
def formula_rustsecp256k1_v0_4_0_gej_add_ge_old(branch, a, b):
"""libsecp256k1's old rustsecp256k1_v0_4_0_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
def formula_rustsecp256k1_v0_4_1_gej_add_ge_old(branch, a, b):
"""libsecp256k1's old rustsecp256k1_v0_4_1_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
a_infinity = (branch & 1) != 0
zero = {}
nonzero = {}
@ -292,15 +292,15 @@ def formula_rustsecp256k1_v0_4_0_gej_add_ge_old(branch, a, b):
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz))
if __name__ == "__main__":
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_ge_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_zinv_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_0_gej_add_ge)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_0_gej_add_ge_old)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_ge_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_zinv_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_1_gej_add_ge)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_1_gej_add_ge_old)
if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive":
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_ge_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_0_gej_add_zinv_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_0_gej_add_ge, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_0_gej_add_ge_old, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_ge_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_4_1_gej_add_zinv_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_4_1_gej_add_ge, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_4_1_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_4_1_gej_add_ge_old, 43)

View File

@ -27,8 +27,8 @@ Note:
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
.align 2
.global rustsecp256k1_v0_4_0_fe_mul_inner
.type rustsecp256k1_v0_4_0_fe_mul_inner, %function
.global rustsecp256k1_v0_4_1_fe_mul_inner
.type rustsecp256k1_v0_4_1_fe_mul_inner, %function
@ Arguments:
@ r0 r Restrict: can overlap with a, not with b
@ r1 a
@ -36,7 +36,7 @@ Note:
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
rustsecp256k1_v0_4_0_fe_mul_inner:
rustsecp256k1_v0_4_1_fe_mul_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
@ -511,18 +511,18 @@ rustsecp256k1_v0_4_0_fe_mul_inner:
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size rustsecp256k1_v0_4_0_fe_mul_inner, .-rustsecp256k1_v0_4_0_fe_mul_inner
.size rustsecp256k1_v0_4_1_fe_mul_inner, .-rustsecp256k1_v0_4_1_fe_mul_inner
.align 2
.global rustsecp256k1_v0_4_0_fe_sqr_inner
.type rustsecp256k1_v0_4_0_fe_sqr_inner, %function
.global rustsecp256k1_v0_4_1_fe_sqr_inner
.type rustsecp256k1_v0_4_1_fe_sqr_inner, %function
@ Arguments:
@ r0 r Can overlap with a
@ r1 a
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
rustsecp256k1_v0_4_0_fe_sqr_inner:
rustsecp256k1_v0_4_1_fe_sqr_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
@ -909,5 +909,5 @@ rustsecp256k1_v0_4_0_fe_sqr_inner:
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size rustsecp256k1_v0_4_0_fe_sqr_inner, .-rustsecp256k1_v0_4_0_fe_sqr_inner
.size rustsecp256k1_v0_4_1_fe_sqr_inner, .-rustsecp256k1_v0_4_1_fe_sqr_inner

View File

@ -16,7 +16,7 @@
reduce the odds of experiencing an unwelcome surprise.
*/
struct rustsecp256k1_v0_4_0_assumption_checker {
struct rustsecp256k1_v0_4_1_assumption_checker {
/* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not
allowed. */
int dummy_array[(

View File

@ -9,25 +9,8 @@
#ifdef USE_BASIC_CONFIG
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_INV_BUILTIN
#undef USE_FIELD_INV_NUM
#undef USE_NUM_GMP
#undef USE_NUM_NONE
#undef USE_SCALAR_INV_BUILTIN
#undef USE_SCALAR_INV_NUM
#undef USE_FORCE_WIDEMUL_INT64
#undef USE_FORCE_WIDEMUL_INT128
#undef ECMULT_WINDOW_SIZE
#define USE_NUM_NONE 1
#define USE_FIELD_INV_BUILTIN 1
#define USE_SCALAR_INV_BUILTIN 1
#define USE_WIDEMUL_64 1
#define ECMULT_WINDOW_SIZE 15
#define ECMULT_GEN_PREC_BITS 4
#endif /* USE_BASIC_CONFIG */

View File

@ -6,14 +6,14 @@
#include <string.h>
#include "include/secp256k1.h"
#include "include/secp256k1_ecdh.h"
#include "../include/secp256k1.h"
#include "../include/secp256k1_ecdh.h"
#include "util.h"
#include "bench.h"
typedef struct {
rustsecp256k1_v0_4_0_context *ctx;
rustsecp256k1_v0_4_0_pubkey point;
rustsecp256k1_v0_4_1_context *ctx;
rustsecp256k1_v0_4_1_pubkey point;
unsigned char scalar[32];
} bench_ecdh_data;
@ -31,7 +31,7 @@ static void bench_ecdh_setup(void* arg) {
for (i = 0; i < 32; i++) {
data->scalar[i] = i + 1;
}
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
}
static void bench_ecdh(void* arg, int iters) {
@ -40,7 +40,7 @@ static void bench_ecdh(void* arg, int iters) {
bench_ecdh_data *data = (bench_ecdh_data*)arg;
for (i = 0; i < iters; i++) {
CHECK(rustsecp256k1_v0_4_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1);
}
}
@ -50,10 +50,10 @@ int main(void) {
int iters = get_iters(20000);
/* create a context with no capabilities */
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT);
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT);
run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, iters);
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
return 0;
}

View File

@ -5,48 +5,192 @@
***********************************************************************/
#include <stdio.h>
#include "include/secp256k1.h"
#include "secp256k1.c"
#include "../include/secp256k1.h"
#include "util.h"
#include "hash_impl.h"
#include "num_impl.h"
#include "field_impl.h"
#include "group_impl.h"
#include "scalar_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
#include "secp256k1.c"
#define POINTS 32768
void help(char **argv) {
printf("Benchmark EC multiplication algorithms\n");
printf("\n");
printf("Usage: %s <help|pippenger_wnaf|strauss_wnaf|simple>\n", argv[0]);
printf("The output shows the number of multiplied and summed points right after the\n");
printf("function name. The letter 'g' indicates that one of the points is the generator.\n");
printf("The benchmarks are divided by the number of points.\n");
printf("\n");
printf("default (ecmult_multi): picks pippenger_wnaf or strauss_wnaf depending on the\n");
printf(" batch size\n");
printf("pippenger_wnaf: for all batch sizes\n");
printf("strauss_wnaf: for all batch sizes\n");
printf("simple: multiply and sum each point individually\n");
}
typedef struct {
/* Setup once in advance */
rustsecp256k1_v0_4_0_context* ctx;
rustsecp256k1_v0_4_0_scratch_space* scratch;
rustsecp256k1_v0_4_0_scalar* scalars;
rustsecp256k1_v0_4_0_ge* pubkeys;
rustsecp256k1_v0_4_0_scalar* seckeys;
rustsecp256k1_v0_4_0_gej* expected_output;
rustsecp256k1_v0_4_0_ecmult_multi_func ecmult_multi;
rustsecp256k1_v0_4_1_context* ctx;
rustsecp256k1_v0_4_1_scratch_space* scratch;
rustsecp256k1_v0_4_1_scalar* scalars;
rustsecp256k1_v0_4_1_ge* pubkeys;
rustsecp256k1_v0_4_1_gej* pubkeys_gej;
rustsecp256k1_v0_4_1_scalar* seckeys;
rustsecp256k1_v0_4_1_gej* expected_output;
rustsecp256k1_v0_4_1_ecmult_multi_func ecmult_multi;
/* Changes per test */
/* Changes per benchmark */
size_t count;
int includes_g;
/* Changes per test iteration */
/* Changes per benchmark iteration, used to pick different scalars and pubkeys
* in each run. */
size_t offset1;
size_t offset2;
/* Test output. */
rustsecp256k1_v0_4_0_gej* output;
/* Benchmark output. */
rustsecp256k1_v0_4_1_gej* output;
} bench_data;
static int bench_callback(rustsecp256k1_v0_4_0_scalar* sc, rustsecp256k1_v0_4_0_ge* ge, size_t idx, void* arg) {
/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */
static void hash_into_offset(bench_data* data, size_t x) {
data->offset1 = (x * 0x537b7f6f + 0x8f66a481) % POINTS;
data->offset2 = (x * 0x7f6f537b + 0x6a1a8f49) % POINTS;
}
/* Check correctness of the benchmark by computing
* sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */
static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) {
int i;
rustsecp256k1_v0_4_1_gej sum_output, tmp;
rustsecp256k1_v0_4_1_scalar sum_scalars;
rustsecp256k1_v0_4_1_gej_set_infinity(&sum_output);
rustsecp256k1_v0_4_1_scalar_clear(&sum_scalars);
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_1_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL);
if (scalar_gen_offset != NULL) {
rustsecp256k1_v0_4_1_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]);
}
if (seckey_offset != NULL) {
rustsecp256k1_v0_4_1_scalar s = data->seckeys[(*seckey_offset+i) % POINTS];
rustsecp256k1_v0_4_1_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]);
rustsecp256k1_v0_4_1_scalar_add(&sum_scalars, &sum_scalars, &s);
}
}
rustsecp256k1_v0_4_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars);
rustsecp256k1_v0_4_1_gej_neg(&tmp, &tmp);
rustsecp256k1_v0_4_1_gej_add_var(&tmp, &tmp, &sum_output, NULL);
CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&tmp));
}
static void bench_ecmult_setup(void* arg) {
bench_data* data = (bench_data*)arg;
/* Re-randomize offset to ensure that we're using different scalars and
* group elements in each run. */
hash_into_offset(data, data->offset1);
}
static void bench_ecmult_gen(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int i;
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]);
}
}
static void bench_ecmult_gen_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters);
}
static void bench_ecmult_const(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int i;
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_1_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256);
}
}
static void bench_ecmult_const_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters);
}
static void bench_ecmult_1(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int i;
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL);
}
}
static void bench_ecmult_1_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters);
}
static void bench_ecmult_1g(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
rustsecp256k1_v0_4_1_scalar zero;
int i;
rustsecp256k1_v0_4_1_scalar_set_int(&zero, 0);
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]);
}
}
static void bench_ecmult_1g_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters);
}
static void bench_ecmult_2g(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int i;
for (i = 0; i < iters/2; ++i) {
rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]);
}
}
static void bench_ecmult_2g_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, &data->offset1, iters/2);
}
static void run_ecmult_bench(bench_data* data, int iters) {
char str[32];
sprintf(str, "ecmult_gen");
run_benchmark(str, bench_ecmult_gen, bench_ecmult_setup, bench_ecmult_gen_teardown, data, 10, iters);
sprintf(str, "ecmult_const");
run_benchmark(str, bench_ecmult_const, bench_ecmult_setup, bench_ecmult_const_teardown, data, 10, iters);
/* ecmult with non generator point */
sprintf(str, "ecmult 1");
run_benchmark(str, bench_ecmult_1, bench_ecmult_setup, bench_ecmult_1_teardown, data, 10, iters);
/* ecmult with generator point */
sprintf(str, "ecmult 1g");
run_benchmark(str, bench_ecmult_1g, bench_ecmult_setup, bench_ecmult_1g_teardown, data, 10, iters);
/* ecmult with generator and non-generator point. The reported time is per point. */
sprintf(str, "ecmult 2g");
run_benchmark(str, bench_ecmult_2g, bench_ecmult_setup, bench_ecmult_2g_teardown, data, 10, 2*iters);
}
static int bench_ecmult_multi_callback(rustsecp256k1_v0_4_1_scalar* sc, rustsecp256k1_v0_4_1_ge* ge, size_t idx, void* arg) {
bench_data* data = (bench_data*)arg;
if (data->includes_g) ++idx;
if (idx == 0) {
*sc = data->scalars[data->offset1];
*ge = rustsecp256k1_v0_4_0_ge_const_g;
*ge = rustsecp256k1_v0_4_1_ge_const_g;
} else {
*sc = data->scalars[(data->offset1 + idx) % POINTS];
*ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS];
@ -54,7 +198,7 @@ static int bench_callback(rustsecp256k1_v0_4_0_scalar* sc, rustsecp256k1_v0_4_0_
return 1;
}
static void bench_ecmult(void* arg, int iters) {
static void bench_ecmult_multi(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int includes_g = data->includes_g;
@ -63,49 +207,48 @@ static void bench_ecmult(void* arg, int iters) {
iters = iters / data->count;
for (iter = 0; iter < iters; ++iter) {
data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_ecmult_multi_callback, arg, count - includes_g);
data->offset1 = (data->offset1 + count) % POINTS;
data->offset2 = (data->offset2 + count - 1) % POINTS;
}
}
static void bench_ecmult_setup(void* arg) {
static void bench_ecmult_multi_setup(void* arg) {
bench_data* data = (bench_data*)arg;
data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS;
data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS;
hash_into_offset(data, data->count);
}
static void bench_ecmult_teardown(void* arg, int iters) {
static void bench_ecmult_multi_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int iter;
iters = iters / data->count;
/* Verify the results in teardown, to avoid doing comparisons while benchmarking. */
for (iter = 0; iter < iters; ++iter) {
rustsecp256k1_v0_4_0_gej tmp;
rustsecp256k1_v0_4_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
CHECK(rustsecp256k1_v0_4_0_gej_is_infinity(&tmp));
rustsecp256k1_v0_4_1_gej tmp;
rustsecp256k1_v0_4_1_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
CHECK(rustsecp256k1_v0_4_1_gej_is_infinity(&tmp));
}
}
static void generate_scalar(uint32_t num, rustsecp256k1_v0_4_0_scalar* scalar) {
rustsecp256k1_v0_4_0_sha256 sha256;
unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
static void generate_scalar(uint32_t num, rustsecp256k1_v0_4_1_scalar* scalar) {
rustsecp256k1_v0_4_1_sha256 sha256;
unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
unsigned char buf[32];
int overflow = 0;
c[6] = num;
c[7] = num >> 8;
c[8] = num >> 16;
c[9] = num >> 24;
rustsecp256k1_v0_4_0_sha256_initialize(&sha256);
rustsecp256k1_v0_4_0_sha256_write(&sha256, c, sizeof(c));
rustsecp256k1_v0_4_0_sha256_finalize(&sha256, buf);
rustsecp256k1_v0_4_0_scalar_set_b32(scalar, buf, &overflow);
rustsecp256k1_v0_4_1_sha256_initialize(&sha256);
rustsecp256k1_v0_4_1_sha256_write(&sha256, c, sizeof(c));
rustsecp256k1_v0_4_1_sha256_finalize(&sha256, buf);
rustsecp256k1_v0_4_1_scalar_set_b32(scalar, buf, &overflow);
CHECK(!overflow);
}
static void run_test(bench_data* data, size_t count, int includes_g, int num_iters) {
static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) {
char str[32];
static const rustsecp256k1_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
static const rustsecp256k1_v0_4_1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
size_t iters = 1 + num_iters / count;
size_t iter;
@ -113,80 +256,89 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite
data->includes_g = includes_g;
/* Compute (the negation of) the expected results directly. */
data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS;
data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS;
hash_into_offset(data, data->count);
for (iter = 0; iter < iters; ++iter) {
rustsecp256k1_v0_4_0_scalar tmp;
rustsecp256k1_v0_4_0_scalar total = data->scalars[(data->offset1++) % POINTS];
rustsecp256k1_v0_4_1_scalar tmp;
rustsecp256k1_v0_4_1_scalar total = data->scalars[(data->offset1++) % POINTS];
size_t i = 0;
for (i = 0; i + 1 < count; ++i) {
rustsecp256k1_v0_4_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
rustsecp256k1_v0_4_0_scalar_add(&total, &total, &tmp);
rustsecp256k1_v0_4_1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
rustsecp256k1_v0_4_1_scalar_add(&total, &total, &tmp);
}
rustsecp256k1_v0_4_0_scalar_negate(&total, &total);
rustsecp256k1_v0_4_0_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total);
rustsecp256k1_v0_4_1_scalar_negate(&total, &total);
rustsecp256k1_v0_4_1_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total);
}
/* Run the benchmark. */
sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count);
run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * iters);
sprintf(str, includes_g ? "ecmult_multi %ig" : "ecmult_multi %i", (int)count);
run_benchmark(str, bench_ecmult_multi, bench_ecmult_multi_setup, bench_ecmult_multi_teardown, data, 10, count * iters);
}
int main(int argc, char **argv) {
bench_data data;
int i, p;
rustsecp256k1_v0_4_0_gej* pubkeys_gej;
size_t scratch_size;
int iters = get_iters(10000);
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = rustsecp256k1_v0_4_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
data.scratch = rustsecp256k1_v0_4_0_scratch_space_create(data.ctx, scratch_size);
data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_multi_var;
data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_multi_var;
if (argc > 1) {
if(have_flag(argc, argv, "pippenger_wnaf")) {
if(have_flag(argc, argv, "-h")
|| have_flag(argc, argv, "--help")
|| have_flag(argc, argv, "help")) {
help(argv);
return 1;
} else if(have_flag(argc, argv, "pippenger_wnaf")) {
printf("Using pippenger_wnaf:\n");
data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_pippenger_batch_single;
data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_pippenger_batch_single;
} else if(have_flag(argc, argv, "strauss_wnaf")) {
printf("Using strauss_wnaf:\n");
data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_strauss_batch_single;
data.ecmult_multi = rustsecp256k1_v0_4_1_ecmult_strauss_batch_single;
} else if(have_flag(argc, argv, "simple")) {
printf("Using simple algorithm:\n");
data.ecmult_multi = rustsecp256k1_v0_4_0_ecmult_multi_var;
rustsecp256k1_v0_4_0_scratch_space_destroy(data.ctx, data.scratch);
data.scratch = NULL;
} else {
fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]);
fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n");
fprintf(stderr, "%s: unrecognized argument '%s'.\n\n", argv[0], argv[1]);
help(argv);
return 1;
}
}
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = rustsecp256k1_v0_4_1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
if (!have_flag(argc, argv, "simple")) {
data.scratch = rustsecp256k1_v0_4_1_scratch_space_create(data.ctx, scratch_size);
} else {
data.scratch = NULL;
}
/* Allocate stuff */
data.scalars = malloc(sizeof(rustsecp256k1_v0_4_0_scalar) * POINTS);
data.seckeys = malloc(sizeof(rustsecp256k1_v0_4_0_scalar) * POINTS);
data.pubkeys = malloc(sizeof(rustsecp256k1_v0_4_0_ge) * POINTS);
data.expected_output = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * (iters + 1));
data.output = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * (iters + 1));
data.scalars = malloc(sizeof(rustsecp256k1_v0_4_1_scalar) * POINTS);
data.seckeys = malloc(sizeof(rustsecp256k1_v0_4_1_scalar) * POINTS);
data.pubkeys = malloc(sizeof(rustsecp256k1_v0_4_1_ge) * POINTS);
data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * POINTS);
data.expected_output = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * (iters + 1));
data.output = malloc(sizeof(rustsecp256k1_v0_4_1_gej) * (iters + 1));
/* Generate a set of scalars, and private/public keypairs. */
pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_4_0_gej) * POINTS);
rustsecp256k1_v0_4_0_gej_set_ge(&pubkeys_gej[0], &rustsecp256k1_v0_4_0_ge_const_g);
rustsecp256k1_v0_4_0_scalar_set_int(&data.seckeys[0], 1);
rustsecp256k1_v0_4_1_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_4_1_ge_const_g);
rustsecp256k1_v0_4_1_scalar_set_int(&data.seckeys[0], 1);
for (i = 0; i < POINTS; ++i) {
generate_scalar(i, &data.scalars[i]);
if (i) {
rustsecp256k1_v0_4_0_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL);
rustsecp256k1_v0_4_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
rustsecp256k1_v0_4_1_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL);
rustsecp256k1_v0_4_1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
}
}
rustsecp256k1_v0_4_0_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS);
free(pubkeys_gej);
rustsecp256k1_v0_4_1_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS);
/* Initialize offset1 and offset2 */
hash_into_offset(&data, 0);
run_ecmult_bench(&data, iters);
for (i = 1; i <= 8; ++i) {
run_test(&data, i, 1, iters);
run_ecmult_multi_bench(&data, i, 1, iters);
}
/* This is disabled with low count of iterations because the loop runs 77 times even with iters=1
@ -195,17 +347,18 @@ int main(int argc, char **argv) {
if (iters > 2) {
for (p = 0; p <= 11; ++p) {
for (i = 9; i <= 16; ++i) {
run_test(&data, i << p, 1, iters);
run_ecmult_multi_bench(&data, i << p, 1, iters);
}
}
}
if (data.scratch != NULL) {
rustsecp256k1_v0_4_0_scratch_space_destroy(data.ctx, data.scratch);
rustsecp256k1_v0_4_1_scratch_space_destroy(data.ctx, data.scratch);
}
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
free(data.scalars);
free(data.pubkeys);
free(data.pubkeys_gej);
free(data.seckeys);
free(data.output);
free(data.expected_output);

View File

@ -5,25 +5,24 @@
***********************************************************************/
#include <stdio.h>
#include "include/secp256k1.h"
#include "secp256k1.c"
#include "../include/secp256k1.h"
#include "assumptions.h"
#include "util.h"
#include "hash_impl.h"
#include "num_impl.h"
#include "field_impl.h"
#include "group_impl.h"
#include "scalar_impl.h"
#include "ecmult_const_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
#include "secp256k1.c"
typedef struct {
rustsecp256k1_v0_4_0_scalar scalar[2];
rustsecp256k1_v0_4_0_fe fe[4];
rustsecp256k1_v0_4_0_ge ge[2];
rustsecp256k1_v0_4_0_gej gej[2];
rustsecp256k1_v0_4_1_scalar scalar[2];
rustsecp256k1_v0_4_1_fe fe[4];
rustsecp256k1_v0_4_1_ge ge[2];
rustsecp256k1_v0_4_1_gej gej[2];
unsigned char data[64];
int wnaf[256];
} bench_inv;
@ -64,18 +63,18 @@ void bench_setup(void* arg) {
}
};
rustsecp256k1_v0_4_0_scalar_set_b32(&data->scalar[0], init[0], NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(&data->scalar[1], init[1], NULL);
rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[0], init[0]);
rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[1], init[1]);
rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[2], init[2]);
rustsecp256k1_v0_4_0_fe_set_b32(&data->fe[3], init[3]);
CHECK(rustsecp256k1_v0_4_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
CHECK(rustsecp256k1_v0_4_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
rustsecp256k1_v0_4_0_gej_set_ge(&data->gej[0], &data->ge[0]);
rustsecp256k1_v0_4_0_gej_rescale(&data->gej[0], &data->fe[2]);
rustsecp256k1_v0_4_0_gej_set_ge(&data->gej[1], &data->ge[1]);
rustsecp256k1_v0_4_0_gej_rescale(&data->gej[1], &data->fe[3]);
rustsecp256k1_v0_4_1_scalar_set_b32(&data->scalar[0], init[0], NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(&data->scalar[1], init[1], NULL);
rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[0], init[0]);
rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[1], init[1]);
rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[2], init[2]);
rustsecp256k1_v0_4_1_fe_set_b32(&data->fe[3], init[3]);
CHECK(rustsecp256k1_v0_4_1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
CHECK(rustsecp256k1_v0_4_1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
rustsecp256k1_v0_4_1_gej_set_ge(&data->gej[0], &data->ge[0]);
rustsecp256k1_v0_4_1_gej_rescale(&data->gej[0], &data->fe[2]);
rustsecp256k1_v0_4_1_gej_set_ge(&data->gej[1], &data->ge[1]);
rustsecp256k1_v0_4_1_gej_rescale(&data->gej[1], &data->fe[3]);
memcpy(data->data, init[0], 32);
memcpy(data->data + 32, init[1], 32);
}
@ -85,7 +84,7 @@ void bench_scalar_add(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@ -95,16 +94,7 @@ void bench_scalar_negate(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_negate(&data->scalar[0], &data->scalar[0]);
}
}
void bench_scalar_sqr(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&data->scalar[0], &data->scalar[0]);
rustsecp256k1_v0_4_1_scalar_negate(&data->scalar[0], &data->scalar[0]);
}
}
@ -113,7 +103,7 @@ void bench_scalar_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
rustsecp256k1_v0_4_1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
}
@ -122,8 +112,8 @@ void bench_scalar_split(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]);
j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
rustsecp256k1_v0_4_1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]);
j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@ -133,8 +123,8 @@ void bench_scalar_inverse(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_inverse(&data->scalar[0], &data->scalar[0]);
j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
rustsecp256k1_v0_4_1_scalar_inverse(&data->scalar[0], &data->scalar[0]);
j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@ -144,8 +134,8 @@ void bench_scalar_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
j += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
rustsecp256k1_v0_4_1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
j += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@ -155,7 +145,7 @@ void bench_field_normalize(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_normalize(&data->fe[0]);
rustsecp256k1_v0_4_1_fe_normalize(&data->fe[0]);
}
}
@ -164,7 +154,7 @@ void bench_field_normalize_weak(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_normalize_weak(&data->fe[0]);
rustsecp256k1_v0_4_1_fe_normalize_weak(&data->fe[0]);
}
}
@ -173,7 +163,7 @@ void bench_field_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
rustsecp256k1_v0_4_1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
}
}
@ -182,7 +172,7 @@ void bench_field_sqr(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_sqr(&data->fe[0], &data->fe[0]);
rustsecp256k1_v0_4_1_fe_sqr(&data->fe[0], &data->fe[0]);
}
}
@ -191,8 +181,8 @@ void bench_field_inverse(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_inv(&data->fe[0], &data->fe[0]);
rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]);
rustsecp256k1_v0_4_1_fe_inv(&data->fe[0], &data->fe[0]);
rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]);
}
}
@ -201,20 +191,20 @@ void bench_field_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_fe_inv_var(&data->fe[0], &data->fe[0]);
rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]);
rustsecp256k1_v0_4_1_fe_inv_var(&data->fe[0], &data->fe[0]);
rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]);
}
}
void bench_field_sqrt(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
rustsecp256k1_v0_4_0_fe t;
rustsecp256k1_v0_4_1_fe t;
for (i = 0; i < iters; i++) {
t = data->fe[0];
j += rustsecp256k1_v0_4_0_fe_sqrt(&data->fe[0], &t);
rustsecp256k1_v0_4_0_fe_add(&data->fe[0], &data->fe[1]);
j += rustsecp256k1_v0_4_1_fe_sqrt(&data->fe[0], &t);
rustsecp256k1_v0_4_1_fe_add(&data->fe[0], &data->fe[1]);
}
CHECK(j <= iters);
}
@ -224,7 +214,7 @@ void bench_group_double_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_gej_double_var(&data->gej[0], &data->gej[0], NULL);
rustsecp256k1_v0_4_1_gej_double_var(&data->gej[0], &data->gej[0], NULL);
}
}
@ -233,7 +223,7 @@ void bench_group_add_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
rustsecp256k1_v0_4_1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
}
}
@ -242,7 +232,7 @@ void bench_group_add_affine(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
rustsecp256k1_v0_4_1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
}
}
@ -251,45 +241,27 @@ void bench_group_add_affine_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
rustsecp256k1_v0_4_1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
}
}
void bench_group_jacobi_var(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
j += rustsecp256k1_v0_4_0_gej_has_quad_y_var(&data->gej[0]);
/* Vary the Y and Z coordinates of the input (the X coordinate doesn't matter to
rustsecp256k1_v0_4_0_gej_has_quad_y_var). Note that the resulting coordinates will
generally not correspond to a point on the curve, but this is not a problem
for the code being benchmarked here. Adding and normalizing have less
overhead than EC operations (which could guarantee the point remains on the
curve). */
rustsecp256k1_v0_4_0_fe_add(&data->gej[0].y, &data->fe[1]);
rustsecp256k1_v0_4_0_fe_add(&data->gej[0].z, &data->fe[2]);
rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].y);
rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].z);
}
CHECK(j <= iters);
}
void bench_group_to_affine_var(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; ++i) {
rustsecp256k1_v0_4_0_ge_set_gej_var(&data->ge[1], &data->gej[0]);
rustsecp256k1_v0_4_1_ge_set_gej_var(&data->ge[1], &data->gej[0]);
/* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates.
Similar to bench_group_jacobi_var, this approach does not result in
coordinates of points on the curve. */
rustsecp256k1_v0_4_0_fe_add(&data->gej[0].x, &data->ge[1].y);
rustsecp256k1_v0_4_0_fe_add(&data->gej[0].y, &data->fe[2]);
rustsecp256k1_v0_4_0_fe_add(&data->gej[0].z, &data->ge[1].x);
rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].x);
rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].y);
rustsecp256k1_v0_4_0_fe_normalize_var(&data->gej[0].z);
Note that the resulting coordinates will generally not correspond to a point
on the curve, but this is not a problem for the code being benchmarked here.
Adding and normalizing have less overhead than EC operations (which could
guarantee the point remains on the curve). */
rustsecp256k1_v0_4_1_fe_add(&data->gej[0].x, &data->ge[1].y);
rustsecp256k1_v0_4_1_fe_add(&data->gej[0].y, &data->fe[2]);
rustsecp256k1_v0_4_1_fe_add(&data->gej[0].z, &data->ge[1].x);
rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].x);
rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].y);
rustsecp256k1_v0_4_1_fe_normalize_var(&data->gej[0].z);
}
}
@ -298,8 +270,8 @@ void bench_ecmult_wnaf(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
bits += rustsecp256k1_v0_4_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
overflow += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
bits += rustsecp256k1_v0_4_1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
overflow += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(overflow >= 0);
CHECK(bits <= 256*iters);
@ -310,8 +282,8 @@ void bench_wnaf_const(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
bits += rustsecp256k1_v0_4_0_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
overflow += rustsecp256k1_v0_4_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
bits += rustsecp256k1_v0_4_1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
overflow += rustsecp256k1_v0_4_1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(overflow >= 0);
CHECK(bits <= 256*iters);
@ -321,35 +293,35 @@ void bench_wnaf_const(void* arg, int iters) {
void bench_sha256(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_sha256_initialize(&sha);
rustsecp256k1_v0_4_0_sha256_write(&sha, data->data, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, data->data);
rustsecp256k1_v0_4_1_sha256_initialize(&sha);
rustsecp256k1_v0_4_1_sha256_write(&sha, data->data, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, data->data);
}
}
void bench_hmac_sha256(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
rustsecp256k1_v0_4_0_hmac_sha256 hmac;
rustsecp256k1_v0_4_1_hmac_sha256 hmac;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, data->data, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, data->data, 32);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, data->data);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, data->data, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, data->data, 32);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, data->data);
}
}
void bench_rfc6979_hmac_sha256(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng;
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
}
}
@ -357,7 +329,7 @@ void bench_context_verify(void* arg, int iters) {
int i;
(void)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_context_destroy(rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY));
rustsecp256k1_v0_4_1_context_destroy(rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY));
}
}
@ -365,39 +337,20 @@ void bench_context_sign(void* arg, int iters) {
int i;
(void)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_context_destroy(rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN));
rustsecp256k1_v0_4_1_context_destroy(rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN));
}
}
#ifndef USE_NUM_NONE
void bench_num_jacobi(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
rustsecp256k1_v0_4_0_num nx, na, norder;
rustsecp256k1_v0_4_0_scalar_get_num(&nx, &data->scalar[0]);
rustsecp256k1_v0_4_0_scalar_order_get_num(&norder);
rustsecp256k1_v0_4_0_scalar_get_num(&na, &data->scalar[1]);
for (i = 0; i < iters; i++) {
j += rustsecp256k1_v0_4_0_num_jacobi(&nx, &norder);
rustsecp256k1_v0_4_0_num_add(&nx, &nx, &na);
}
CHECK(j <= iters);
}
#endif
int main(int argc, char **argv) {
bench_inv data;
int iters = get_iters(20000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100);
@ -411,7 +364,6 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters);
@ -424,8 +376,5 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 1 + iters/1000);
if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 1 + iters/100);
#ifndef USE_NUM_NONE
if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, iters*10);
#endif
return 0;
}

View File

@ -4,13 +4,13 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#include "include/secp256k1.h"
#include "include/secp256k1_recovery.h"
#include "../include/secp256k1.h"
#include "../include/secp256k1_recovery.h"
#include "util.h"
#include "bench.h"
typedef struct {
rustsecp256k1_v0_4_0_context *ctx;
rustsecp256k1_v0_4_1_context *ctx;
unsigned char msg[32];
unsigned char sig[64];
} bench_recover_data;
@ -18,16 +18,16 @@ typedef struct {
void bench_recover(void* arg, int iters) {
int i;
bench_recover_data *data = (bench_recover_data*)arg;
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_1_pubkey pubkey;
unsigned char pubkeyc[33];
for (i = 0; i < iters; i++) {
int j;
size_t pubkeylen = 33;
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature sig;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg));
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature sig;
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg));
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
for (j = 0; j < 32; j++) {
data->sig[j + 32] = data->msg[j]; /* Move former message to S. */
data->msg[j] = data->sig[j]; /* Move former R to message. */
@ -53,10 +53,10 @@ int main(void) {
int iters = get_iters(20000);
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY);
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY);
run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, iters);
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
return 0;
}

View File

@ -8,16 +8,16 @@
#include <stdlib.h>
#include "include/secp256k1.h"
#include "include/secp256k1_schnorrsig.h"
#include "../include/secp256k1.h"
#include "../include/secp256k1_schnorrsig.h"
#include "util.h"
#include "bench.h"
typedef struct {
rustsecp256k1_v0_4_0_context *ctx;
rustsecp256k1_v0_4_1_context *ctx;
int n;
const rustsecp256k1_v0_4_0_keypair **keypairs;
const rustsecp256k1_v0_4_1_keypair **keypairs;
const unsigned char **pk;
const unsigned char **sigs;
const unsigned char **msgs;
@ -32,7 +32,7 @@ void bench_schnorrsig_sign(void* arg, int iters) {
for (i = 0; i < iters; i++) {
msg[0] = i;
msg[1] = i >> 8;
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL));
}
}
@ -41,9 +41,9 @@ void bench_schnorrsig_verify(void* arg, int iters) {
int i;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_xonly_pubkey pk;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk));
rustsecp256k1_v0_4_1_xonly_pubkey pk;
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk));
}
}
@ -52,8 +52,8 @@ int main(void) {
bench_schnorrsig_data data;
int iters = get_iters(10000);
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN);
data.keypairs = (const rustsecp256k1_v0_4_0_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_4_0_keypair *));
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN);
data.keypairs = (const rustsecp256k1_v0_4_1_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_4_1_keypair *));
data.pk = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
@ -62,9 +62,9 @@ int main(void) {
unsigned char sk[32];
unsigned char *msg = (unsigned char *)malloc(32);
unsigned char *sig = (unsigned char *)malloc(64);
rustsecp256k1_v0_4_0_keypair *keypair = (rustsecp256k1_v0_4_0_keypair *)malloc(sizeof(*keypair));
rustsecp256k1_v0_4_1_keypair *keypair = (rustsecp256k1_v0_4_1_keypair *)malloc(sizeof(*keypair));
unsigned char *pk_char = (unsigned char *)malloc(32);
rustsecp256k1_v0_4_0_xonly_pubkey pk;
rustsecp256k1_v0_4_1_xonly_pubkey pk;
msg[0] = sk[0] = i;
msg[1] = sk[1] = i >> 8;
msg[2] = sk[2] = i >> 16;
@ -77,10 +77,10 @@ int main(void) {
data.msgs[i] = msg;
data.sigs[i] = sig;
CHECK(rustsecp256k1_v0_4_0_keypair_create(data.ctx, keypair, sk));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(data.ctx, keypair, sk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(data.ctx, &pk, NULL, keypair));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1);
}
run_benchmark("schnorrsig_sign", bench_schnorrsig_sign, NULL, NULL, (void *) &data, 10, iters);
@ -97,6 +97,6 @@ int main(void) {
free(data.msgs);
free(data.sigs);
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
return 0;
}

View File

@ -4,12 +4,12 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#include "include/secp256k1.h"
#include "../include/secp256k1.h"
#include "util.h"
#include "bench.h"
typedef struct {
rustsecp256k1_v0_4_0_context* ctx;
rustsecp256k1_v0_4_1_context* ctx;
unsigned char msg[32];
unsigned char key[32];
} bench_sign_data;
@ -34,9 +34,9 @@ static void bench_sign_run(void* arg, int iters) {
for (i = 0; i < iters; i++) {
size_t siglen = 74;
int j;
rustsecp256k1_v0_4_0_ecdsa_signature signature;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
rustsecp256k1_v0_4_1_ecdsa_signature signature;
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
for (j = 0; j < 32; j++) {
data->msg[j] = sig[j];
data->key[j] = sig[j + 32];
@ -49,10 +49,10 @@ int main(void) {
int iters = get_iters(20000);
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN);
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN);
run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, iters);
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
return 0;
}

View File

@ -7,7 +7,7 @@
#include <stdio.h>
#include <string.h>
#include "include/secp256k1.h"
#include "../include/secp256k1.h"
#include "util.h"
#include "bench.h"
@ -19,7 +19,7 @@
typedef struct {
rustsecp256k1_v0_4_0_context *ctx;
rustsecp256k1_v0_4_1_context *ctx;
unsigned char msg[32];
unsigned char key[32];
unsigned char sig[72];
@ -36,14 +36,14 @@ static void bench_verify(void* arg, int iters) {
bench_verify_data* data = (bench_verify_data*)arg;
for (i = 0; i < iters; i++) {
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_0_ecdsa_signature sig;
rustsecp256k1_v0_4_1_pubkey pubkey;
rustsecp256k1_v0_4_1_ecdsa_signature sig;
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
@ -82,13 +82,13 @@ static void bench_verify_openssl(void* arg, int iters) {
int main(void) {
int i;
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_0_ecdsa_signature sig;
rustsecp256k1_v0_4_1_pubkey pubkey;
rustsecp256k1_v0_4_1_ecdsa_signature sig;
bench_verify_data data;
int iters = get_iters(20000);
data.ctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
data.ctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
for (i = 0; i < 32; i++) {
data.msg[i] = 1 + i;
@ -97,11 +97,11 @@ int main(void) {
data.key[i] = 33 + i;
}
data.siglen = 72;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(data.ctx, &pubkey, data.key));
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(data.ctx, &pubkey, data.key));
data.pubkeylen = 33;
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
run_benchmark("ecdsa_verify", bench_verify, NULL, NULL, &data, 10, iters);
#ifdef ENABLE_OPENSSL_TESTS
@ -110,6 +110,6 @@ int main(void) {
EC_GROUP_free(data.ec_group);
#endif
rustsecp256k1_v0_4_0_context_destroy(data.ctx);
rustsecp256k1_v0_4_1_context_destroy(data.ctx);
return 0;
}

View File

@ -13,9 +13,9 @@
#include "group.h"
#include "ecmult.h"
static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *r, rustsecp256k1_v0_4_0_scalar *s, const unsigned char *sig, size_t size);
static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *s);
static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message);
static int rustsecp256k1_v0_4_0_ecdsa_sig_sign(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_scalar *seckey, const rustsecp256k1_v0_4_0_scalar *message, const rustsecp256k1_v0_4_0_scalar *nonce, int *recid);
static int rustsecp256k1_v0_4_1_ecdsa_sig_parse(rustsecp256k1_v0_4_1_scalar *r, rustsecp256k1_v0_4_1_scalar *s, const unsigned char *sig, size_t size);
static int rustsecp256k1_v0_4_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *s);
static int rustsecp256k1_v0_4_1_ecdsa_sig_verify(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message);
static int rustsecp256k1_v0_4_1_ecdsa_sig_sign(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_scalar *seckey, const rustsecp256k1_v0_4_1_scalar *message, const rustsecp256k1_v0_4_1_scalar *nonce, int *recid);
#endif /* SECP256K1_ECDSA_H */

View File

@ -28,7 +28,7 @@
* sage: '%x' % (EllipticCurve ([F (a), F (b)]).order())
* 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141'
*/
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
);
@ -42,11 +42,11 @@ static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_order_as_f
* sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order())
* '14551231950b75fc4402da1722fc9baee'
*/
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
);
static int rustsecp256k1_v0_4_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) {
static int rustsecp256k1_v0_4_1_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) {
size_t lenleft;
unsigned char b1;
VERIFY_CHECK(len != NULL);
@ -99,7 +99,7 @@ static int rustsecp256k1_v0_4_0_der_read_len(size_t *len, const unsigned char **
return 1;
}
static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
static int rustsecp256k1_v0_4_1_der_parse_integer(rustsecp256k1_v0_4_1_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
int overflow = 0;
unsigned char ra[32] = {0};
size_t rlen;
@ -109,7 +109,7 @@ static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r
return 0;
}
(*sig)++;
if (rustsecp256k1_v0_4_0_der_read_len(&rlen, sig, sigend) == 0) {
if (rustsecp256k1_v0_4_1_der_read_len(&rlen, sig, sigend) == 0) {
return 0;
}
if (rlen == 0 || *sig + rlen > sigend) {
@ -141,23 +141,23 @@ static int rustsecp256k1_v0_4_0_der_parse_integer(rustsecp256k1_v0_4_0_scalar *r
}
if (!overflow) {
memcpy(ra + 32 - rlen, *sig, rlen);
rustsecp256k1_v0_4_0_scalar_set_b32(r, ra, &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(r, ra, &overflow);
}
if (overflow) {
rustsecp256k1_v0_4_0_scalar_set_int(r, 0);
rustsecp256k1_v0_4_1_scalar_set_int(r, 0);
}
(*sig) += rlen;
return 1;
}
static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr, rustsecp256k1_v0_4_0_scalar *rs, const unsigned char *sig, size_t size) {
static int rustsecp256k1_v0_4_1_ecdsa_sig_parse(rustsecp256k1_v0_4_1_scalar *rr, rustsecp256k1_v0_4_1_scalar *rs, const unsigned char *sig, size_t size) {
const unsigned char *sigend = sig + size;
size_t rlen;
if (sig == sigend || *(sig++) != 0x30) {
/* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */
return 0;
}
if (rustsecp256k1_v0_4_0_der_read_len(&rlen, &sig, sigend) == 0) {
if (rustsecp256k1_v0_4_1_der_read_len(&rlen, &sig, sigend) == 0) {
return 0;
}
if (rlen != (size_t)(sigend - sig)) {
@ -165,10 +165,10 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr,
return 0;
}
if (!rustsecp256k1_v0_4_0_der_parse_integer(rr, &sig, sigend)) {
if (!rustsecp256k1_v0_4_1_der_parse_integer(rr, &sig, sigend)) {
return 0;
}
if (!rustsecp256k1_v0_4_0_der_parse_integer(rs, &sig, sigend)) {
if (!rustsecp256k1_v0_4_1_der_parse_integer(rs, &sig, sigend)) {
return 0;
}
@ -180,12 +180,12 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_parse(rustsecp256k1_v0_4_0_scalar *rr,
return 1;
}
static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_0_scalar* ar, const rustsecp256k1_v0_4_0_scalar* as) {
static int rustsecp256k1_v0_4_1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_4_1_scalar* ar, const rustsecp256k1_v0_4_1_scalar* as) {
unsigned char r[33] = {0}, s[33] = {0};
unsigned char *rp = r, *sp = s;
size_t lenR = 33, lenS = 33;
rustsecp256k1_v0_4_0_scalar_get_b32(&r[1], ar);
rustsecp256k1_v0_4_0_scalar_get_b32(&s[1], as);
rustsecp256k1_v0_4_1_scalar_get_b32(&r[1], ar);
rustsecp256k1_v0_4_1_scalar_get_b32(&s[1], as);
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
if (*size < 6+lenS+lenR) {
@ -204,42 +204,42 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_serialize(unsigned char *sig, size_t *
return 1;
}
static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar *sigr, const rustsecp256k1_v0_4_0_scalar *sigs, const rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message) {
static int rustsecp256k1_v0_4_1_ecdsa_sig_verify(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar *sigr, const rustsecp256k1_v0_4_1_scalar *sigs, const rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message) {
unsigned char c[32];
rustsecp256k1_v0_4_0_scalar sn, u1, u2;
rustsecp256k1_v0_4_1_scalar sn, u1, u2;
#if !defined(EXHAUSTIVE_TEST_ORDER)
rustsecp256k1_v0_4_0_fe xr;
rustsecp256k1_v0_4_1_fe xr;
#endif
rustsecp256k1_v0_4_0_gej pubkeyj;
rustsecp256k1_v0_4_0_gej pr;
rustsecp256k1_v0_4_1_gej pubkeyj;
rustsecp256k1_v0_4_1_gej pr;
if (rustsecp256k1_v0_4_0_scalar_is_zero(sigr) || rustsecp256k1_v0_4_0_scalar_is_zero(sigs)) {
if (rustsecp256k1_v0_4_1_scalar_is_zero(sigr) || rustsecp256k1_v0_4_1_scalar_is_zero(sigs)) {
return 0;
}
rustsecp256k1_v0_4_0_scalar_inverse_var(&sn, sigs);
rustsecp256k1_v0_4_0_scalar_mul(&u1, &sn, message);
rustsecp256k1_v0_4_0_scalar_mul(&u2, &sn, sigr);
rustsecp256k1_v0_4_0_gej_set_ge(&pubkeyj, pubkey);
rustsecp256k1_v0_4_0_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
if (rustsecp256k1_v0_4_0_gej_is_infinity(&pr)) {
rustsecp256k1_v0_4_1_scalar_inverse_var(&sn, sigs);
rustsecp256k1_v0_4_1_scalar_mul(&u1, &sn, message);
rustsecp256k1_v0_4_1_scalar_mul(&u2, &sn, sigr);
rustsecp256k1_v0_4_1_gej_set_ge(&pubkeyj, pubkey);
rustsecp256k1_v0_4_1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
if (rustsecp256k1_v0_4_1_gej_is_infinity(&pr)) {
return 0;
}
#if defined(EXHAUSTIVE_TEST_ORDER)
{
rustsecp256k1_v0_4_0_scalar computed_r;
rustsecp256k1_v0_4_0_ge pr_ge;
rustsecp256k1_v0_4_0_ge_set_gej(&pr_ge, &pr);
rustsecp256k1_v0_4_0_fe_normalize(&pr_ge.x);
rustsecp256k1_v0_4_1_scalar computed_r;
rustsecp256k1_v0_4_1_ge pr_ge;
rustsecp256k1_v0_4_1_ge_set_gej(&pr_ge, &pr);
rustsecp256k1_v0_4_1_fe_normalize(&pr_ge.x);
rustsecp256k1_v0_4_0_fe_get_b32(c, &pr_ge.x);
rustsecp256k1_v0_4_0_scalar_set_b32(&computed_r, c, NULL);
return rustsecp256k1_v0_4_0_scalar_eq(sigr, &computed_r);
rustsecp256k1_v0_4_1_fe_get_b32(c, &pr_ge.x);
rustsecp256k1_v0_4_1_scalar_set_b32(&computed_r, c, NULL);
return rustsecp256k1_v0_4_1_scalar_eq(sigr, &computed_r);
}
#else
rustsecp256k1_v0_4_0_scalar_get_b32(c, sigr);
rustsecp256k1_v0_4_0_fe_set_b32(&xr, c);
rustsecp256k1_v0_4_1_scalar_get_b32(c, sigr);
rustsecp256k1_v0_4_1_fe_set_b32(&xr, c);
/** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
* in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p),
@ -255,18 +255,18 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmu
* <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x)
*
* Thus, we can avoid the inversion, but we have to check both cases separately.
* rustsecp256k1_v0_4_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
* rustsecp256k1_v0_4_1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
*/
if (rustsecp256k1_v0_4_0_gej_eq_x_var(&xr, &pr)) {
if (rustsecp256k1_v0_4_1_gej_eq_x_var(&xr, &pr)) {
/* xr * pr.z^2 mod p == pr.x, so the signature is valid. */
return 1;
}
if (rustsecp256k1_v0_4_0_fe_cmp_var(&xr, &rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order) >= 0) {
if (rustsecp256k1_v0_4_1_fe_cmp_var(&xr, &rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order) >= 0) {
/* xr + n >= p, so we can skip testing the second case. */
return 0;
}
rustsecp256k1_v0_4_0_fe_add(&xr, &rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe);
if (rustsecp256k1_v0_4_0_gej_eq_x_var(&xr, &pr)) {
rustsecp256k1_v0_4_1_fe_add(&xr, &rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe);
if (rustsecp256k1_v0_4_1_gej_eq_x_var(&xr, &pr)) {
/* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */
return 1;
}
@ -274,42 +274,42 @@ static int rustsecp256k1_v0_4_0_ecdsa_sig_verify(const rustsecp256k1_v0_4_0_ecmu
#endif
}
static int rustsecp256k1_v0_4_0_ecdsa_sig_sign(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_scalar *sigr, rustsecp256k1_v0_4_0_scalar *sigs, const rustsecp256k1_v0_4_0_scalar *seckey, const rustsecp256k1_v0_4_0_scalar *message, const rustsecp256k1_v0_4_0_scalar *nonce, int *recid) {
static int rustsecp256k1_v0_4_1_ecdsa_sig_sign(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_scalar *sigr, rustsecp256k1_v0_4_1_scalar *sigs, const rustsecp256k1_v0_4_1_scalar *seckey, const rustsecp256k1_v0_4_1_scalar *message, const rustsecp256k1_v0_4_1_scalar *nonce, int *recid) {
unsigned char b[32];
rustsecp256k1_v0_4_0_gej rp;
rustsecp256k1_v0_4_0_ge r;
rustsecp256k1_v0_4_0_scalar n;
rustsecp256k1_v0_4_1_gej rp;
rustsecp256k1_v0_4_1_ge r;
rustsecp256k1_v0_4_1_scalar n;
int overflow = 0;
int high;
rustsecp256k1_v0_4_0_ecmult_gen(ctx, &rp, nonce);
rustsecp256k1_v0_4_0_ge_set_gej(&r, &rp);
rustsecp256k1_v0_4_0_fe_normalize(&r.x);
rustsecp256k1_v0_4_0_fe_normalize(&r.y);
rustsecp256k1_v0_4_0_fe_get_b32(b, &r.x);
rustsecp256k1_v0_4_0_scalar_set_b32(sigr, b, &overflow);
rustsecp256k1_v0_4_1_ecmult_gen(ctx, &rp, nonce);
rustsecp256k1_v0_4_1_ge_set_gej(&r, &rp);
rustsecp256k1_v0_4_1_fe_normalize(&r.x);
rustsecp256k1_v0_4_1_fe_normalize(&r.y);
rustsecp256k1_v0_4_1_fe_get_b32(b, &r.x);
rustsecp256k1_v0_4_1_scalar_set_b32(sigr, b, &overflow);
if (recid) {
/* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log
* of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria.
*/
*recid = (overflow << 1) | rustsecp256k1_v0_4_0_fe_is_odd(&r.y);
*recid = (overflow << 1) | rustsecp256k1_v0_4_1_fe_is_odd(&r.y);
}
rustsecp256k1_v0_4_0_scalar_mul(&n, sigr, seckey);
rustsecp256k1_v0_4_0_scalar_add(&n, &n, message);
rustsecp256k1_v0_4_0_scalar_inverse(sigs, nonce);
rustsecp256k1_v0_4_0_scalar_mul(sigs, sigs, &n);
rustsecp256k1_v0_4_0_scalar_clear(&n);
rustsecp256k1_v0_4_0_gej_clear(&rp);
rustsecp256k1_v0_4_0_ge_clear(&r);
high = rustsecp256k1_v0_4_0_scalar_is_high(sigs);
rustsecp256k1_v0_4_0_scalar_cond_negate(sigs, high);
rustsecp256k1_v0_4_1_scalar_mul(&n, sigr, seckey);
rustsecp256k1_v0_4_1_scalar_add(&n, &n, message);
rustsecp256k1_v0_4_1_scalar_inverse(sigs, nonce);
rustsecp256k1_v0_4_1_scalar_mul(sigs, sigs, &n);
rustsecp256k1_v0_4_1_scalar_clear(&n);
rustsecp256k1_v0_4_1_gej_clear(&rp);
rustsecp256k1_v0_4_1_ge_clear(&r);
high = rustsecp256k1_v0_4_1_scalar_is_high(sigs);
rustsecp256k1_v0_4_1_scalar_cond_negate(sigs, high);
if (recid) {
*recid ^= high;
}
/* P.x = order is on the curve, so technically sig->r could end up being zero, which would be an invalid signature.
* This is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N.
*/
return !rustsecp256k1_v0_4_0_scalar_is_zero(sigr) & !rustsecp256k1_v0_4_0_scalar_is_zero(sigs);
return !rustsecp256k1_v0_4_1_scalar_is_zero(sigr) & !rustsecp256k1_v0_4_1_scalar_is_zero(sigs);
}
#endif /* SECP256K1_ECDSA_IMPL_H */

View File

@ -14,12 +14,12 @@
#include "ecmult.h"
#include "ecmult_gen.h"
static int rustsecp256k1_v0_4_0_eckey_pubkey_parse(rustsecp256k1_v0_4_0_ge *elem, const unsigned char *pub, size_t size);
static int rustsecp256k1_v0_4_0_eckey_pubkey_serialize(rustsecp256k1_v0_4_0_ge *elem, unsigned char *pub, size_t *size, int compressed);
static int rustsecp256k1_v0_4_1_eckey_pubkey_parse(rustsecp256k1_v0_4_1_ge *elem, const unsigned char *pub, size_t size);
static int rustsecp256k1_v0_4_1_eckey_pubkey_serialize(rustsecp256k1_v0_4_1_ge *elem, unsigned char *pub, size_t *size, int compressed);
static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak);
static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak);
static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak);
static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak);
static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak);
static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak);
static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak);
static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak);
#endif /* SECP256K1_ECKEY_H */

View File

@ -14,82 +14,82 @@
#include "group.h"
#include "ecmult_gen.h"
static int rustsecp256k1_v0_4_0_eckey_pubkey_parse(rustsecp256k1_v0_4_0_ge *elem, const unsigned char *pub, size_t size) {
static int rustsecp256k1_v0_4_1_eckey_pubkey_parse(rustsecp256k1_v0_4_1_ge *elem, const unsigned char *pub, size_t size) {
if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) {
rustsecp256k1_v0_4_0_fe x;
return rustsecp256k1_v0_4_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_4_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
rustsecp256k1_v0_4_1_fe x;
return rustsecp256k1_v0_4_1_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_4_1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
} else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
rustsecp256k1_v0_4_0_fe x, y;
if (!rustsecp256k1_v0_4_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_4_0_fe_set_b32(&y, pub+33)) {
rustsecp256k1_v0_4_1_fe x, y;
if (!rustsecp256k1_v0_4_1_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_4_1_fe_set_b32(&y, pub+33)) {
return 0;
}
rustsecp256k1_v0_4_0_ge_set_xy(elem, &x, &y);
rustsecp256k1_v0_4_1_ge_set_xy(elem, &x, &y);
if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) &&
rustsecp256k1_v0_4_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
rustsecp256k1_v0_4_1_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
return 0;
}
return rustsecp256k1_v0_4_0_ge_is_valid_var(elem);
return rustsecp256k1_v0_4_1_ge_is_valid_var(elem);
} else {
return 0;
}
}
static int rustsecp256k1_v0_4_0_eckey_pubkey_serialize(rustsecp256k1_v0_4_0_ge *elem, unsigned char *pub, size_t *size, int compressed) {
if (rustsecp256k1_v0_4_0_ge_is_infinity(elem)) {
static int rustsecp256k1_v0_4_1_eckey_pubkey_serialize(rustsecp256k1_v0_4_1_ge *elem, unsigned char *pub, size_t *size, int compressed) {
if (rustsecp256k1_v0_4_1_ge_is_infinity(elem)) {
return 0;
}
rustsecp256k1_v0_4_0_fe_normalize_var(&elem->x);
rustsecp256k1_v0_4_0_fe_normalize_var(&elem->y);
rustsecp256k1_v0_4_0_fe_get_b32(&pub[1], &elem->x);
rustsecp256k1_v0_4_1_fe_normalize_var(&elem->x);
rustsecp256k1_v0_4_1_fe_normalize_var(&elem->y);
rustsecp256k1_v0_4_1_fe_get_b32(&pub[1], &elem->x);
if (compressed) {
*size = 33;
pub[0] = rustsecp256k1_v0_4_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
pub[0] = rustsecp256k1_v0_4_1_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
} else {
*size = 65;
pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED;
rustsecp256k1_v0_4_0_fe_get_b32(&pub[33], &elem->y);
rustsecp256k1_v0_4_1_fe_get_b32(&pub[33], &elem->y);
}
return 1;
}
static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak) {
rustsecp256k1_v0_4_0_scalar_add(key, key, tweak);
return !rustsecp256k1_v0_4_0_scalar_is_zero(key);
static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak) {
rustsecp256k1_v0_4_1_scalar_add(key, key, tweak);
return !rustsecp256k1_v0_4_1_scalar_is_zero(key);
}
static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak) {
rustsecp256k1_v0_4_0_gej pt;
rustsecp256k1_v0_4_0_scalar one;
rustsecp256k1_v0_4_0_gej_set_ge(&pt, key);
rustsecp256k1_v0_4_0_scalar_set_int(&one, 1);
rustsecp256k1_v0_4_0_ecmult(ctx, &pt, &pt, &one, tweak);
static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak) {
rustsecp256k1_v0_4_1_gej pt;
rustsecp256k1_v0_4_1_scalar one;
rustsecp256k1_v0_4_1_gej_set_ge(&pt, key);
rustsecp256k1_v0_4_1_scalar_set_int(&one, 1);
rustsecp256k1_v0_4_1_ecmult(ctx, &pt, &pt, &one, tweak);
if (rustsecp256k1_v0_4_0_gej_is_infinity(&pt)) {
if (rustsecp256k1_v0_4_1_gej_is_infinity(&pt)) {
return 0;
}
rustsecp256k1_v0_4_0_ge_set_gej(key, &pt);
rustsecp256k1_v0_4_1_ge_set_gej(key, &pt);
return 1;
}
static int rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_0_scalar *key, const rustsecp256k1_v0_4_0_scalar *tweak) {
static int rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(rustsecp256k1_v0_4_1_scalar *key, const rustsecp256k1_v0_4_1_scalar *tweak) {
int ret;
ret = !rustsecp256k1_v0_4_0_scalar_is_zero(tweak);
ret = !rustsecp256k1_v0_4_1_scalar_is_zero(tweak);
rustsecp256k1_v0_4_0_scalar_mul(key, key, tweak);
rustsecp256k1_v0_4_1_scalar_mul(key, key, tweak);
return ret;
}
static int rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_ge *key, const rustsecp256k1_v0_4_0_scalar *tweak) {
rustsecp256k1_v0_4_0_scalar zero;
rustsecp256k1_v0_4_0_gej pt;
if (rustsecp256k1_v0_4_0_scalar_is_zero(tweak)) {
static int rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_ge *key, const rustsecp256k1_v0_4_1_scalar *tweak) {
rustsecp256k1_v0_4_1_scalar zero;
rustsecp256k1_v0_4_1_gej pt;
if (rustsecp256k1_v0_4_1_scalar_is_zero(tweak)) {
return 0;
}
rustsecp256k1_v0_4_0_scalar_set_int(&zero, 0);
rustsecp256k1_v0_4_0_gej_set_ge(&pt, key);
rustsecp256k1_v0_4_0_ecmult(ctx, &pt, &pt, tweak, &zero);
rustsecp256k1_v0_4_0_ge_set_gej(key, &pt);
rustsecp256k1_v0_4_1_scalar_set_int(&zero, 0);
rustsecp256k1_v0_4_1_gej_set_ge(&pt, key);
rustsecp256k1_v0_4_1_ecmult(ctx, &pt, &pt, tweak, &zero);
rustsecp256k1_v0_4_1_ge_set_gej(key, &pt);
return 1;
}

View File

@ -7,28 +7,27 @@
#ifndef SECP256K1_ECMULT_H
#define SECP256K1_ECMULT_H
#include "num.h"
#include "group.h"
#include "scalar.h"
#include "scratch.h"
typedef struct {
/* For accelerating the computation of a*P + b*G: */
rustsecp256k1_v0_4_0_ge_storage (*pre_g)[]; /* odd multiples of the generator */
rustsecp256k1_v0_4_0_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
} rustsecp256k1_v0_4_0_ecmult_context;
rustsecp256k1_v0_4_1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
rustsecp256k1_v0_4_1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
} rustsecp256k1_v0_4_1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void rustsecp256k1_v0_4_0_ecmult_context_init(rustsecp256k1_v0_4_0_ecmult_context *ctx);
static void rustsecp256k1_v0_4_0_ecmult_context_build(rustsecp256k1_v0_4_0_ecmult_context *ctx, void **prealloc);
static void rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_context *dst, const rustsecp256k1_v0_4_0_ecmult_context *src);
static void rustsecp256k1_v0_4_0_ecmult_context_clear(rustsecp256k1_v0_4_0_ecmult_context *ctx);
static int rustsecp256k1_v0_4_0_ecmult_context_is_built(const rustsecp256k1_v0_4_0_ecmult_context *ctx);
static void rustsecp256k1_v0_4_1_ecmult_context_init(rustsecp256k1_v0_4_1_ecmult_context *ctx);
static void rustsecp256k1_v0_4_1_ecmult_context_build(rustsecp256k1_v0_4_1_ecmult_context *ctx, void **prealloc);
static void rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_context *dst, const rustsecp256k1_v0_4_1_ecmult_context *src);
static void rustsecp256k1_v0_4_1_ecmult_context_clear(rustsecp256k1_v0_4_1_ecmult_context *ctx);
static int rustsecp256k1_v0_4_1_ecmult_context_is_built(const rustsecp256k1_v0_4_1_ecmult_context *ctx);
/** Double multiply: R = na*A + ng*G */
static void rustsecp256k1_v0_4_0_ecmult(const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_scalar *na, const rustsecp256k1_v0_4_0_scalar *ng);
static void rustsecp256k1_v0_4_1_ecmult(const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_scalar *na, const rustsecp256k1_v0_4_1_scalar *ng);
typedef int (rustsecp256k1_v0_4_0_ecmult_multi_callback)(rustsecp256k1_v0_4_0_scalar *sc, rustsecp256k1_v0_4_0_ge *pt, size_t idx, void *data);
typedef int (rustsecp256k1_v0_4_1_ecmult_multi_callback)(rustsecp256k1_v0_4_1_scalar *sc, rustsecp256k1_v0_4_1_ge *pt, size_t idx, void *data);
/**
* Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai.
@ -41,6 +40,6 @@ typedef int (rustsecp256k1_v0_4_0_ecmult_multi_callback)(rustsecp256k1_v0_4_0_sc
* 0 if there is not enough scratch space for a single point or
* callback returns 0
*/
static int rustsecp256k1_v0_4_0_ecmult_multi_var(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_ecmult_context *ctx, rustsecp256k1_v0_4_0_scratch *scratch, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *inp_g_sc, rustsecp256k1_v0_4_0_ecmult_multi_callback cb, void *cbdata, size_t n);
static int rustsecp256k1_v0_4_1_ecmult_multi_var(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_ecmult_context *ctx, rustsecp256k1_v0_4_1_scratch *scratch, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *inp_g_sc, rustsecp256k1_v0_4_1_ecmult_multi_callback cb, void *cbdata, size_t n);
#endif /* SECP256K1_ECMULT_H */

View File

@ -15,6 +15,6 @@
* Here `bits` should be set to the maximum bitlength of the _absolute value_ of `q`, plus
* one because we internally sometimes add 2 to the number during the WNAF conversion.
*/
static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_scalar *q, int bits);
static void rustsecp256k1_v0_4_1_ecmult_const(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_scalar *q, int bits);
#endif /* SECP256K1_ECMULT_CONST_H */

View File

@ -19,12 +19,12 @@
int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
int abs_n = ((n) + mask) ^ mask; \
int idx_n = abs_n >> 1; \
rustsecp256k1_v0_4_0_fe neg_y; \
rustsecp256k1_v0_4_1_fe neg_y; \
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
VERIFY_SETUP(rustsecp256k1_v0_4_0_fe_clear(&(r)->x)); \
VERIFY_SETUP(rustsecp256k1_v0_4_0_fe_clear(&(r)->y)); \
VERIFY_SETUP(rustsecp256k1_v0_4_1_fe_clear(&(r)->x)); \
VERIFY_SETUP(rustsecp256k1_v0_4_1_fe_clear(&(r)->y)); \
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \
* or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
(r)->x = (pre)[m].x; \
@ -32,12 +32,12 @@
for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \
/* This loop is used to avoid secret data in array indices. See
* the comment in ecmult_gen_impl.h for rationale. */ \
rustsecp256k1_v0_4_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
rustsecp256k1_v0_4_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
rustsecp256k1_v0_4_1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
rustsecp256k1_v0_4_1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
} \
(r)->infinity = 0; \
rustsecp256k1_v0_4_0_fe_negate(&neg_y, &(r)->y, 1); \
rustsecp256k1_v0_4_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
rustsecp256k1_v0_4_1_fe_negate(&neg_y, &(r)->y, 1); \
rustsecp256k1_v0_4_1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
} while(0)
@ -54,7 +54,7 @@
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0_scalar *scalar, int w, int size) {
static int rustsecp256k1_v0_4_1_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_1_scalar *scalar, int w, int size) {
int global_sign;
int skew = 0;
int word = 0;
@ -65,7 +65,7 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0
int flip;
int bit;
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_1_scalar s;
int not_neg_one;
VERIFY_CHECK(w > 0);
@ -83,32 +83,32 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0
* particular, to ensure that the outputs from the endomorphism-split fit into
* 128 bits). If we negate, the parity of our number flips, inverting which of
* {1, 2} we want to add to the scalar when ensuring that it's odd. Further
* complicating things, -1 interacts badly with `rustsecp256k1_v0_4_0_scalar_cadd_bit` and
* complicating things, -1 interacts badly with `rustsecp256k1_v0_4_1_scalar_cadd_bit` and
* we need to special-case it in this logic. */
flip = rustsecp256k1_v0_4_0_scalar_is_high(scalar);
flip = rustsecp256k1_v0_4_1_scalar_is_high(scalar);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
bit = flip ^ !rustsecp256k1_v0_4_0_scalar_is_even(scalar);
bit = flip ^ !rustsecp256k1_v0_4_1_scalar_is_even(scalar);
/* We check for negative one, since adding 2 to it will cause an overflow */
rustsecp256k1_v0_4_0_scalar_negate(&s, scalar);
not_neg_one = !rustsecp256k1_v0_4_0_scalar_is_one(&s);
rustsecp256k1_v0_4_1_scalar_negate(&s, scalar);
not_neg_one = !rustsecp256k1_v0_4_1_scalar_is_one(&s);
s = *scalar;
rustsecp256k1_v0_4_0_scalar_cadd_bit(&s, bit, not_neg_one);
rustsecp256k1_v0_4_1_scalar_cadd_bit(&s, bit, not_neg_one);
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
* that we added two to it and flipped it. In fact for -1 these operations are
* identical. We only flipped, but since skewing is required (in the sense that
* the skew must be 1 or 2, never zero) and flipping is not, we need to change
* our flags to claim that we only skewed. */
global_sign = rustsecp256k1_v0_4_0_scalar_cond_negate(&s, flip);
global_sign = rustsecp256k1_v0_4_1_scalar_cond_negate(&s, flip);
global_sign *= not_neg_one * 2 - 1;
skew = 1 << bit;
/* 4 */
u_last = rustsecp256k1_v0_4_0_scalar_shr_int(&s, w);
u_last = rustsecp256k1_v0_4_1_scalar_shr_int(&s, w);
do {
int even;
/* 4.1 4.4 */
u = rustsecp256k1_v0_4_0_scalar_shr_int(&s, w);
u = rustsecp256k1_v0_4_1_scalar_shr_int(&s, w);
/* 4.2 */
even = ((u & 1) == 0);
/* In contrast to the original algorithm, u_last is always > 0 and
@ -129,21 +129,21 @@ static int rustsecp256k1_v0_4_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_4_0
} while (word * w < size);
wnaf[word] = u * global_sign;
VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_is_zero(&s));
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(&s));
VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
return skew;
}
static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a, const rustsecp256k1_v0_4_0_scalar *scalar, int size) {
rustsecp256k1_v0_4_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
rustsecp256k1_v0_4_0_ge tmpa;
rustsecp256k1_v0_4_0_fe Z;
static void rustsecp256k1_v0_4_1_ecmult_const(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a, const rustsecp256k1_v0_4_1_scalar *scalar, int size) {
rustsecp256k1_v0_4_1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
rustsecp256k1_v0_4_1_ge tmpa;
rustsecp256k1_v0_4_1_fe Z;
int skew_1;
rustsecp256k1_v0_4_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
rustsecp256k1_v0_4_1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
rustsecp256k1_v0_4_0_scalar q_1, q_lam;
rustsecp256k1_v0_4_1_scalar q_1, q_lam;
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
@ -153,12 +153,12 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
rustsecp256k1_v0_4_0_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = rustsecp256k1_v0_4_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = rustsecp256k1_v0_4_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
rustsecp256k1_v0_4_1_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = rustsecp256k1_v0_4_1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = rustsecp256k1_v0_4_1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
{
skew_1 = rustsecp256k1_v0_4_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
skew_1 = rustsecp256k1_v0_4_1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
skew_lam = 0;
}
@ -168,14 +168,14 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const
* that the Z coordinate was 1, use affine addition formulae, and correct
* the Z coordinate of the result once at the end.
*/
rustsecp256k1_v0_4_0_gej_set_ge(r, a);
rustsecp256k1_v0_4_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
rustsecp256k1_v0_4_1_gej_set_ge(r, a);
rustsecp256k1_v0_4_1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
rustsecp256k1_v0_4_0_fe_normalize_weak(&pre_a[i].y);
rustsecp256k1_v0_4_1_fe_normalize_weak(&pre_a[i].y);
}
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
rustsecp256k1_v0_4_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
rustsecp256k1_v0_4_1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
@ -186,67 +186,67 @@ static void rustsecp256k1_v0_4_0_ecmult_const(rustsecp256k1_v0_4_0_gej *r, const
i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
rustsecp256k1_v0_4_0_gej_set_ge(r, &tmpa);
rustsecp256k1_v0_4_1_gej_set_ge(r, &tmpa);
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa);
}
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
int j;
for (j = 0; j < WINDOW_A - 1; ++j) {
rustsecp256k1_v0_4_0_gej_double(r, r);
rustsecp256k1_v0_4_1_gej_double(r, r);
}
n = wnaf_1[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa);
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &tmpa);
}
}
rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, &Z);
rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &Z);
{
/* Correct for wNAF skew */
rustsecp256k1_v0_4_0_ge correction = *a;
rustsecp256k1_v0_4_0_ge_storage correction_1_stor;
rustsecp256k1_v0_4_0_ge_storage correction_lam_stor;
rustsecp256k1_v0_4_0_ge_storage a2_stor;
rustsecp256k1_v0_4_0_gej tmpj;
rustsecp256k1_v0_4_0_gej_set_ge(&tmpj, &correction);
rustsecp256k1_v0_4_0_gej_double_var(&tmpj, &tmpj, NULL);
rustsecp256k1_v0_4_0_ge_set_gej(&correction, &tmpj);
rustsecp256k1_v0_4_0_ge_to_storage(&correction_1_stor, a);
rustsecp256k1_v0_4_1_ge correction = *a;
rustsecp256k1_v0_4_1_ge_storage correction_1_stor;
rustsecp256k1_v0_4_1_ge_storage correction_lam_stor;
rustsecp256k1_v0_4_1_ge_storage a2_stor;
rustsecp256k1_v0_4_1_gej tmpj;
rustsecp256k1_v0_4_1_gej_set_ge(&tmpj, &correction);
rustsecp256k1_v0_4_1_gej_double_var(&tmpj, &tmpj, NULL);
rustsecp256k1_v0_4_1_ge_set_gej(&correction, &tmpj);
rustsecp256k1_v0_4_1_ge_to_storage(&correction_1_stor, a);
if (size > 128) {
rustsecp256k1_v0_4_0_ge_to_storage(&correction_lam_stor, a);
rustsecp256k1_v0_4_1_ge_to_storage(&correction_lam_stor, a);
}
rustsecp256k1_v0_4_0_ge_to_storage(&a2_stor, &correction);
rustsecp256k1_v0_4_1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
rustsecp256k1_v0_4_0_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
rustsecp256k1_v0_4_1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
if (size > 128) {
rustsecp256k1_v0_4_0_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
rustsecp256k1_v0_4_1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
/* Apply the correction */
rustsecp256k1_v0_4_0_ge_from_storage(&correction, &correction_1_stor);
rustsecp256k1_v0_4_0_ge_neg(&correction, &correction);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &correction);
rustsecp256k1_v0_4_1_ge_from_storage(&correction, &correction_1_stor);
rustsecp256k1_v0_4_1_ge_neg(&correction, &correction);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &correction);
if (size > 128) {
rustsecp256k1_v0_4_0_ge_from_storage(&correction, &correction_lam_stor);
rustsecp256k1_v0_4_0_ge_neg(&correction, &correction);
rustsecp256k1_v0_4_0_ge_mul_lambda(&correction, &correction);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &correction);
rustsecp256k1_v0_4_1_ge_from_storage(&correction, &correction_lam_stor);
rustsecp256k1_v0_4_1_ge_neg(&correction, &correction);
rustsecp256k1_v0_4_1_ge_mul_lambda(&correction, &correction);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &correction);
}
}
}

View File

@ -30,21 +30,21 @@ typedef struct {
* None of the resulting prec group elements have a known scalar, and neither do any of
* the intermediate sums while computing a*G.
*/
rustsecp256k1_v0_4_0_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */
rustsecp256k1_v0_4_0_scalar blind;
rustsecp256k1_v0_4_0_gej initial;
} rustsecp256k1_v0_4_0_ecmult_gen_context;
rustsecp256k1_v0_4_1_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */
rustsecp256k1_v0_4_1_scalar blind;
rustsecp256k1_v0_4_1_gej initial;
} rustsecp256k1_v0_4_1_ecmult_gen_context;
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
static void rustsecp256k1_v0_4_0_ecmult_gen_context_init(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx);
static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx, void **prealloc);
static void rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_gen_context *dst, const rustsecp256k1_v0_4_0_ecmult_gen_context* src);
static void rustsecp256k1_v0_4_0_ecmult_gen_context_clear(rustsecp256k1_v0_4_0_ecmult_gen_context* ctx);
static int rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx);
static void rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx);
static void rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx, void **prealloc);
static void rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context *dst, const rustsecp256k1_v0_4_1_ecmult_gen_context* src);
static void rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context* ctx);
static int rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx);
/** Multiply with the generator: R = a*G */
static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *a);
static void rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *a);
static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, const unsigned char *seed32);
static void rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, const unsigned char *seed32);
#endif /* SECP256K1_ECMULT_GEN_H */

View File

@ -17,20 +17,20 @@
#endif
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_4_0_ecmult_gen_context*) NULL)->prec));
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_4_1_ecmult_gen_context*) NULL)->prec));
#else
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
#endif
static void rustsecp256k1_v0_4_0_ecmult_gen_context_init(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx) {
static void rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, void **prealloc) {
static void rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, void **prealloc) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
rustsecp256k1_v0_4_0_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G];
rustsecp256k1_v0_4_0_gej gj;
rustsecp256k1_v0_4_0_gej nums_gej;
rustsecp256k1_v0_4_1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G];
rustsecp256k1_v0_4_1_gej gj;
rustsecp256k1_v0_4_1_gej nums_gej;
int i, j;
size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
void* const base = *prealloc;
@ -40,101 +40,101 @@ static void rustsecp256k1_v0_4_0_ecmult_gen_context_build(rustsecp256k1_v0_4_0_e
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
ctx->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
/* get the generator */
rustsecp256k1_v0_4_0_gej_set_ge(&gj, &rustsecp256k1_v0_4_0_ge_const_g);
rustsecp256k1_v0_4_1_gej_set_ge(&gj, &rustsecp256k1_v0_4_1_ge_const_g);
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
{
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
rustsecp256k1_v0_4_0_fe nums_x;
rustsecp256k1_v0_4_0_ge nums_ge;
rustsecp256k1_v0_4_1_fe nums_x;
rustsecp256k1_v0_4_1_ge nums_ge;
int r;
r = rustsecp256k1_v0_4_0_fe_set_b32(&nums_x, nums_b32);
r = rustsecp256k1_v0_4_1_fe_set_b32(&nums_x, nums_b32);
(void)r;
VERIFY_CHECK(r);
r = rustsecp256k1_v0_4_0_ge_set_xo_var(&nums_ge, &nums_x, 0);
r = rustsecp256k1_v0_4_1_ge_set_xo_var(&nums_ge, &nums_x, 0);
(void)r;
VERIFY_CHECK(r);
rustsecp256k1_v0_4_0_gej_set_ge(&nums_gej, &nums_ge);
rustsecp256k1_v0_4_1_gej_set_ge(&nums_gej, &nums_ge);
/* Add G to make the bits in x uniformly distributed. */
rustsecp256k1_v0_4_0_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_4_0_ge_const_g, NULL);
rustsecp256k1_v0_4_1_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_4_1_ge_const_g, NULL);
}
/* compute prec. */
{
rustsecp256k1_v0_4_0_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */
rustsecp256k1_v0_4_0_gej gbase;
rustsecp256k1_v0_4_0_gej numsbase;
rustsecp256k1_v0_4_1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */
rustsecp256k1_v0_4_1_gej gbase;
rustsecp256k1_v0_4_1_gej numsbase;
gbase = gj; /* PREC_G^j * G */
numsbase = nums_gej; /* 2^j * nums. */
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
/* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
precj[j*ECMULT_GEN_PREC_G] = numsbase;
for (i = 1; i < ECMULT_GEN_PREC_G; i++) {
rustsecp256k1_v0_4_0_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL);
rustsecp256k1_v0_4_1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL);
}
/* Multiply gbase by PREC_G. */
for (i = 0; i < ECMULT_GEN_PREC_B; i++) {
rustsecp256k1_v0_4_0_gej_double_var(&gbase, &gbase, NULL);
rustsecp256k1_v0_4_1_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
rustsecp256k1_v0_4_0_gej_double_var(&numsbase, &numsbase, NULL);
rustsecp256k1_v0_4_1_gej_double_var(&numsbase, &numsbase, NULL);
if (j == ECMULT_GEN_PREC_N - 2) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
rustsecp256k1_v0_4_0_gej_neg(&numsbase, &numsbase);
rustsecp256k1_v0_4_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
rustsecp256k1_v0_4_1_gej_neg(&numsbase, &numsbase);
rustsecp256k1_v0_4_1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
rustsecp256k1_v0_4_0_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G);
rustsecp256k1_v0_4_1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G);
}
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
rustsecp256k1_v0_4_0_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]);
rustsecp256k1_v0_4_1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]);
}
}
#else
(void)prealloc;
ctx->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])rustsecp256k1_v0_4_0_ecmult_static_context;
ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])rustsecp256k1_v0_4_1_ecmult_static_context;
#endif
rustsecp256k1_v0_4_0_ecmult_gen_blind(ctx, NULL);
rustsecp256k1_v0_4_1_ecmult_gen_blind(ctx, NULL);
}
static int rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_0_ecmult_gen_context* ctx) {
static int rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx) {
return ctx->prec != NULL;
}
static void rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_0_ecmult_gen_context *dst, const rustsecp256k1_v0_4_0_ecmult_gen_context *src) {
static void rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context *dst, const rustsecp256k1_v0_4_1_ecmult_gen_context *src) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
if (src->prec != NULL) {
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->prec = (rustsecp256k1_v0_4_0_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
dst->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
}
#else
(void)dst, (void)src;
#endif
}
static void rustsecp256k1_v0_4_0_ecmult_gen_context_clear(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx) {
rustsecp256k1_v0_4_0_scalar_clear(&ctx->blind);
rustsecp256k1_v0_4_0_gej_clear(&ctx->initial);
static void rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) {
rustsecp256k1_v0_4_1_scalar_clear(&ctx->blind);
rustsecp256k1_v0_4_1_gej_clear(&ctx->initial);
ctx->prec = NULL;
}
static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_scalar *gn) {
rustsecp256k1_v0_4_0_ge add;
rustsecp256k1_v0_4_0_ge_storage adds;
rustsecp256k1_v0_4_0_scalar gnb;
static void rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *gn) {
rustsecp256k1_v0_4_1_ge add;
rustsecp256k1_v0_4_1_ge_storage adds;
rustsecp256k1_v0_4_1_scalar gnb;
int bits;
int i, j;
memset(&adds, 0, sizeof(adds));
*r = ctx->initial;
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
rustsecp256k1_v0_4_0_scalar_add(&gnb, gn, &ctx->blind);
rustsecp256k1_v0_4_1_scalar_add(&gnb, gn, &ctx->blind);
add.infinity = 0;
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
bits = rustsecp256k1_v0_4_0_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B);
bits = rustsecp256k1_v0_4_1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B);
for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
/** This uses a conditional move to avoid any secret data in array indexes.
* _Any_ use of secret indexes has been demonstrated to result in timing
@ -146,33 +146,33 @@ static void rustsecp256k1_v0_4_0_ecmult_gen(const rustsecp256k1_v0_4_0_ecmult_ge
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
* (https://www.tau.ac.il/~tromer/papers/cache.pdf)
*/
rustsecp256k1_v0_4_0_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
rustsecp256k1_v0_4_1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
}
rustsecp256k1_v0_4_0_ge_from_storage(&add, &adds);
rustsecp256k1_v0_4_0_gej_add_ge(r, r, &add);
rustsecp256k1_v0_4_1_ge_from_storage(&add, &adds);
rustsecp256k1_v0_4_1_gej_add_ge(r, r, &add);
}
bits = 0;
rustsecp256k1_v0_4_0_ge_clear(&add);
rustsecp256k1_v0_4_0_scalar_clear(&gnb);
rustsecp256k1_v0_4_1_ge_clear(&add);
rustsecp256k1_v0_4_1_scalar_clear(&gnb);
}
/* Setup blinding values for rustsecp256k1_v0_4_0_ecmult_gen. */
static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_gen_context *ctx, const unsigned char *seed32) {
rustsecp256k1_v0_4_0_scalar b;
rustsecp256k1_v0_4_0_gej gb;
rustsecp256k1_v0_4_0_fe s;
/* Setup blinding values for rustsecp256k1_v0_4_1_ecmult_gen. */
static void rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, const unsigned char *seed32) {
rustsecp256k1_v0_4_1_scalar b;
rustsecp256k1_v0_4_1_gej gb;
rustsecp256k1_v0_4_1_fe s;
unsigned char nonce32[32];
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng;
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng;
int overflow;
unsigned char keydata[64] = {0};
if (seed32 == NULL) {
/* When seed is NULL, reset the initial point and blinding value. */
rustsecp256k1_v0_4_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_4_0_ge_const_g);
rustsecp256k1_v0_4_0_gej_neg(&ctx->initial, &ctx->initial);
rustsecp256k1_v0_4_0_scalar_set_int(&ctx->blind, 1);
rustsecp256k1_v0_4_1_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_4_1_ge_const_g);
rustsecp256k1_v0_4_1_gej_neg(&ctx->initial, &ctx->initial);
rustsecp256k1_v0_4_1_scalar_set_int(&ctx->blind, 1);
}
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
rustsecp256k1_v0_4_0_scalar_get_b32(nonce32, &ctx->blind);
rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &ctx->blind);
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
* asking the caller for blinding values directly and expecting them to retry on failure.
@ -181,28 +181,28 @@ static void rustsecp256k1_v0_4_0_ecmult_gen_blind(rustsecp256k1_v0_4_0_ecmult_ge
if (seed32 != NULL) {
memcpy(keydata + 32, seed32, 32);
}
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
memset(keydata, 0, sizeof(keydata));
/* Accept unobservably small non-uniformity. */
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
overflow = !rustsecp256k1_v0_4_0_fe_set_b32(&s, nonce32);
overflow |= rustsecp256k1_v0_4_0_fe_is_zero(&s);
rustsecp256k1_v0_4_0_fe_cmov(&s, &rustsecp256k1_v0_4_0_fe_one, overflow);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
overflow = !rustsecp256k1_v0_4_1_fe_set_b32(&s, nonce32);
overflow |= rustsecp256k1_v0_4_1_fe_is_zero(&s);
rustsecp256k1_v0_4_1_fe_cmov(&s, &rustsecp256k1_v0_4_1_fe_one, overflow);
/* Randomize the projection to defend against multiplier sidechannels. */
rustsecp256k1_v0_4_0_gej_rescale(&ctx->initial, &s);
rustsecp256k1_v0_4_0_fe_clear(&s);
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
rustsecp256k1_v0_4_0_scalar_set_b32(&b, nonce32, NULL);
rustsecp256k1_v0_4_1_gej_rescale(&ctx->initial, &s);
rustsecp256k1_v0_4_1_fe_clear(&s);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
rustsecp256k1_v0_4_1_scalar_set_b32(&b, nonce32, NULL);
/* A blinding value of 0 works, but would undermine the projection hardening. */
rustsecp256k1_v0_4_0_scalar_cmov(&b, &rustsecp256k1_v0_4_0_scalar_one, rustsecp256k1_v0_4_0_scalar_is_zero(&b));
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng);
rustsecp256k1_v0_4_1_scalar_cmov(&b, &rustsecp256k1_v0_4_1_scalar_one, rustsecp256k1_v0_4_1_scalar_is_zero(&b));
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng);
memset(nonce32, 0, 32);
rustsecp256k1_v0_4_0_ecmult_gen(ctx, &gb, &b);
rustsecp256k1_v0_4_0_scalar_negate(&b, &b);
rustsecp256k1_v0_4_1_ecmult_gen(ctx, &gb, &b);
rustsecp256k1_v0_4_1_scalar_negate(&b, &b);
ctx->blind = b;
ctx->initial = gb;
rustsecp256k1_v0_4_0_scalar_clear(&b);
rustsecp256k1_v0_4_0_gej_clear(&gb);
rustsecp256k1_v0_4_1_scalar_clear(&b);
rustsecp256k1_v0_4_1_gej_clear(&gb);
}
#endif /* SECP256K1_ECMULT_GEN_IMPL_H */

File diff suppressed because it is too large Load Diff

View File

@ -35,100 +35,91 @@
/** Normalize a field element. This brings the field element to a canonical representation, reduces
* its magnitude to 1, and reduces it modulo field size `p`.
*/
static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r);
static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r);
/** Weakly normalize a field element: reduce its magnitude to 1, but don't fully normalize. */
static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r);
static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r);
/** Normalize a field element, without constant-time guarantee. */
static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r);
static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
* implementation may optionally normalize the input, but this should not be relied upon. */
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value. */
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
* implementation may optionally normalize the input, but this should not be relied upon. */
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value,
* without constant-time guarantee. */
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r);
/** Set a field element equal to a small integer. Resulting field element is normalized. */
static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a);
static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a);
/** Sets a field element equal to zero, initializing all fields. */
static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a);
/** Verify whether a field element is zero. Requires the input to be normalized. */
static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a);
static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a);
/** Check the "oddness" of a field element. Requires the input to be normalized. */
static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a);
static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a);
/** Compare two field elements. Requires magnitude-1 inputs. */
static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b);
static int rustsecp256k1_v0_4_1_fe_equal(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b);
/** Same as rustsecp256k1_v0_4_0_fe_equal, but may be variable time. */
static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b);
/** Same as rustsecp256k1_v0_4_1_fe_equal, but may be variable time. */
static int rustsecp256k1_v0_4_1_fe_equal_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b);
/** Compare two field elements. Requires both inputs to be normalized */
static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b);
static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b);
/** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */
static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a);
static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a);
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a);
/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input
* as an argument. The magnitude of the output is one higher. */
static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m);
static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m);
/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that
* small integer. */
static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a);
static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a);
/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */
static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a);
/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8.
* The output magnitude is 1 (but not guaranteed to be normalized). */
static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b);
static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b);
/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8.
* The output magnitude is 1 (but not guaranteed to be normalized). */
static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a);
/** If a has a square root, it is computed in r and 1 is returned. If a does not
* have a square root, the root of its negation is computed and 0 is returned.
* The input's magnitude can be at most 8. The output magnitude is 1 (but not
* guaranteed to be normalized). The result in r will always be a square
* itself. */
static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a);
/** Checks whether a field element is a quadratic residue. */
static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a);
static int rustsecp256k1_v0_4_1_fe_sqrt(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a);
/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be
* at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */
static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a);
/** Potentially faster version of rustsecp256k1_v0_4_0_fe_inv, without constant-time guarantee. */
static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a);
/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be
* at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and
* outputs must not overlap in memory. */
static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len);
/** Potentially faster version of rustsecp256k1_v0_4_1_fe_inv, without constant-time guarantee. */
static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a);
/** Convert a field element to the storage type. */
static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a);
static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a);
/** Convert a field element back from the storage type. */
static void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a);
static void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag);
static void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag);
static void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag);
#endif /* SECP256K1_FIELD_H */

View File

@ -18,7 +18,7 @@ typedef struct {
int magnitude;
int normalized;
#endif
} rustsecp256k1_v0_4_0_fe;
} rustsecp256k1_v0_4_1_fe;
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
@ -42,7 +42,7 @@ typedef struct {
typedef struct {
uint32_t n[8];
} rustsecp256k1_v0_4_0_fe_storage;
} rustsecp256k1_v0_4_1_fe_storage;
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}
#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0]

View File

@ -9,9 +9,10 @@
#include "util.h"
#include "field.h"
#include "modinv32_impl.h"
#ifdef VERIFY
static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_verify(const rustsecp256k1_v0_4_1_fe *a) {
const uint32_t *d = a->n;
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
r &= (d[0] <= 0x3FFFFFFUL * m);
@ -39,7 +40,7 @@ static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) {
}
#endif
static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -90,11 +91,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -121,11 +122,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -177,11 +178,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r) {
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -210,7 +211,7 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
}
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r) {
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r) {
uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
uint32_t z0, z1;
uint32_t x;
@ -262,34 +263,34 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_f
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a) {
r->n[0] = a;
r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a) {
const uint32_t *t = a->n;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
return a->n[0] & 1;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a) {
int i;
#ifdef VERIFY
a->magnitude = 0;
@ -300,13 +301,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_
}
}
static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_0_fe_verify(b);
rustsecp256k1_v0_4_1_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(b);
#endif
for (i = 9; i >= 0; i--) {
if (a->n[i] > b->n[i]) {
@ -319,7 +320,7 @@ static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, con
return 0;
}
static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a) {
static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a) {
int ret;
r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24);
r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22);
@ -337,7 +338,7 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns
r->magnitude = 1;
if (ret) {
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
} else {
r->normalized = 0;
}
@ -346,10 +347,10 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns
}
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r[0] = (a->n[9] >> 14) & 0xff;
r[1] = (a->n[9] >> 6) & 0xff;
@ -385,10 +386,10 @@ static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k
r[31] = a->n[0] & 0xff;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= m);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0];
r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1];
@ -403,11 +404,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0
#ifdef VERIFY
r->magnitude = m + 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a) {
r->n[0] *= a;
r->n[1] *= a;
r->n[2] *= a;
@ -421,13 +422,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_
#ifdef VERIFY
r->magnitude *= a;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r->n[0] += a->n[0];
r->n[1] += a->n[1];
@ -442,15 +443,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe
#ifdef VERIFY
r->magnitude += a->magnitude;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
#if defined(USE_EXTERNAL_ASM)
/* External assembler implementation */
void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a);
void rustsecp256k1_v0_4_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
void rustsecp256k1_v0_4_1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#else
@ -460,7 +461,7 @@ void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
uint64_t c, d;
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
@ -790,7 +791,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint32_t *r, cons
/* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
uint64_t c, d;
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
@ -1065,37 +1066,37 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint32_t *r, cons
}
#endif
static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b) {
static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
VERIFY_CHECK(b->magnitude <= 8);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_0_fe_verify(b);
rustsecp256k1_v0_4_1_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
rustsecp256k1_v0_4_0_fe_mul_inner(r->n, a->n, b->n);
rustsecp256k1_v0_4_1_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
rustsecp256k1_v0_4_0_fe_sqr_inner(r->n, a->n);
rustsecp256k1_v0_4_1_fe_sqr_inner(r->n, a->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag) {
uint32_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint32_t)0);
@ -1118,7 +1119,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_f
#endif
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag) {
uint32_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint32_t)0);
@ -1133,7 +1134,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_
r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
}
static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
@ -1147,7 +1148,7 @@ static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *
r->n[7] = a->n[8] >> 16 | a->n[9] << 10;
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a) {
r->n[0] = a->n[0] & 0x3FFFFFFUL;
r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL);
r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL);
@ -1164,4 +1165,92 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_
#endif
}
static void rustsecp256k1_v0_4_1_fe_from_signed30(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a) {
const uint32_t M26 = UINT32_MAX >> 6;
const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4],
a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8];
/* The output from rustsecp256k1_v0_4_1_modinv32{_var} should be normalized to range [0,modulus), and
* have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8).
*/
VERIFY_CHECK(a0 >> 30 == 0);
VERIFY_CHECK(a1 >> 30 == 0);
VERIFY_CHECK(a2 >> 30 == 0);
VERIFY_CHECK(a3 >> 30 == 0);
VERIFY_CHECK(a4 >> 30 == 0);
VERIFY_CHECK(a5 >> 30 == 0);
VERIFY_CHECK(a6 >> 30 == 0);
VERIFY_CHECK(a7 >> 30 == 0);
VERIFY_CHECK(a8 >> 16 == 0);
r->n[0] = a0 & M26;
r->n[1] = (a0 >> 26 | a1 << 4) & M26;
r->n[2] = (a1 >> 22 | a2 << 8) & M26;
r->n[3] = (a2 >> 18 | a3 << 12) & M26;
r->n[4] = (a3 >> 14 | a4 << 16) & M26;
r->n[5] = (a4 >> 10 | a5 << 20) & M26;
r->n[6] = (a5 >> 6 | a6 << 24) & M26;
r->n[7] = (a6 >> 2 ) & M26;
r->n[8] = (a6 >> 28 | a7 << 2) & M26;
r->n[9] = (a7 >> 24 | a8 << 6);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_1_fe_to_signed30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_fe *a) {
const uint32_t M30 = UINT32_MAX >> 2;
const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4],
a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9];
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
r->v[0] = (a0 | a1 << 26) & M30;
r->v[1] = (a1 >> 4 | a2 << 22) & M30;
r->v[2] = (a2 >> 8 | a3 << 18) & M30;
r->v[3] = (a3 >> 12 | a4 << 14) & M30;
r->v[4] = (a4 >> 16 | a5 << 10) & M30;
r->v[5] = (a5 >> 20 | a6 << 6) & M30;
r->v[6] = (a6 >> 24 | a7 << 2
| a8 << 28) & M30;
r->v[7] = (a8 >> 2 | a9 << 24) & M30;
r->v[8] = a9 >> 6;
}
static const rustsecp256k1_v0_4_1_modinv32_modinfo rustsecp256k1_v0_4_1_const_modinfo_fe = {
{{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}},
0x2DDACACFL
};
static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) {
rustsecp256k1_v0_4_1_fe tmp;
rustsecp256k1_v0_4_1_modinv32_signed30 s;
tmp = *x;
rustsecp256k1_v0_4_1_fe_normalize(&tmp);
rustsecp256k1_v0_4_1_fe_to_signed30(&s, &tmp);
rustsecp256k1_v0_4_1_modinv32(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe);
rustsecp256k1_v0_4_1_fe_from_signed30(r, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp));
}
static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) {
rustsecp256k1_v0_4_1_fe tmp;
rustsecp256k1_v0_4_1_modinv32_signed30 s;
tmp = *x;
rustsecp256k1_v0_4_1_fe_normalize_var(&tmp);
rustsecp256k1_v0_4_1_fe_to_signed30(&s, &tmp);
rustsecp256k1_v0_4_1_modinv32_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe);
rustsecp256k1_v0_4_1_fe_from_signed30(r, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp));
}
#endif /* SECP256K1_FIELD_REPR_IMPL_H */

View File

@ -18,7 +18,7 @@ typedef struct {
int magnitude;
int normalized;
#endif
} rustsecp256k1_v0_4_0_fe;
} rustsecp256k1_v0_4_1_fe;
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
@ -37,7 +37,7 @@ typedef struct {
typedef struct {
uint64_t n[4];
} rustsecp256k1_v0_4_0_fe_storage;
} rustsecp256k1_v0_4_1_fe_storage;
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \
(d0) | (((uint64_t)(d1)) << 32), \

View File

@ -14,7 +14,7 @@
#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
#define SECP256K1_FIELD_INNER5X52_IMPL_H
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c
@ -284,7 +284,7 @@ __asm__ __volatile__(
);
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c

View File

@ -13,6 +13,7 @@
#include "util.h"
#include "field.h"
#include "modinv64_impl.h"
#if defined(USE_ASM_X86_64)
#include "field_5x52_asm_impl.h"
@ -29,7 +30,7 @@
*/
#ifdef VERIFY
static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_verify(const rustsecp256k1_v0_4_1_fe *a) {
const uint64_t *d = a->n;
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
/* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
@ -50,7 +51,7 @@ static void rustsecp256k1_v0_4_0_fe_verify(const rustsecp256k1_v0_4_0_fe *a) {
}
#endif
static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize(rustsecp256k1_v0_4_1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -89,11 +90,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize_weak(rustsecp256k1_v0_4_1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -113,11 +114,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_weak(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) {
static void rustsecp256k1_v0_4_1_fe_normalize_var(rustsecp256k1_v0_4_1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -157,11 +158,11 @@ static void rustsecp256k1_v0_4_0_fe_normalize_var(rustsecp256k1_v0_4_0_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r) {
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero(const rustsecp256k1_v0_4_1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
@ -184,7 +185,7 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero(rustsecp256k1_v0_4_0_fe *r
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_fe *r) {
static int rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(const rustsecp256k1_v0_4_1_fe *r) {
uint64_t t0, t1, t2, t3, t4;
uint64_t z0, z1;
uint64_t x;
@ -225,34 +226,34 @@ static int rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_4_0_f
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_set_int(rustsecp256k1_v0_4_0_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_set_int(rustsecp256k1_v0_4_1_fe *r, int a) {
r->n[0] = a;
r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_zero(const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_zero(const rustsecp256k1_v0_4_1_fe *a) {
const uint64_t *t = a->n;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_is_odd(const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_is_odd(const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
return a->n[0] & 1;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_clear(rustsecp256k1_v0_4_1_fe *a) {
int i;
#ifdef VERIFY
a->magnitude = 0;
@ -263,13 +264,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_clear(rustsecp256k1_v0_4_0_
}
}
static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
static int rustsecp256k1_v0_4_1_fe_cmp_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_0_fe_verify(b);
rustsecp256k1_v0_4_1_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(b);
#endif
for (i = 4; i >= 0; i--) {
if (a->n[i] > b->n[i]) {
@ -282,7 +283,7 @@ static int rustsecp256k1_v0_4_0_fe_cmp_var(const rustsecp256k1_v0_4_0_fe *a, con
return 0;
}
static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const unsigned char *a) {
static int rustsecp256k1_v0_4_1_fe_set_b32(rustsecp256k1_v0_4_1_fe *r, const unsigned char *a) {
int ret;
r->n[0] = (uint64_t)a[31]
| ((uint64_t)a[30] << 8)
@ -323,7 +324,7 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns
r->magnitude = 1;
if (ret) {
r->normalized = 1;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
} else {
r->normalized = 0;
}
@ -332,10 +333,10 @@ static int rustsecp256k1_v0_4_0_fe_set_b32(rustsecp256k1_v0_4_0_fe *r, const uns
}
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r[0] = (a->n[4] >> 40) & 0xFF;
r[1] = (a->n[4] >> 32) & 0xFF;
@ -371,10 +372,10 @@ static void rustsecp256k1_v0_4_0_fe_get_b32(unsigned char *r, const rustsecp256k
r[31] = a->n[0] & 0xFF;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int m) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_negate(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int m) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= m);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
@ -384,11 +385,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_negate(rustsecp256k1_v0_4_0
#ifdef VERIFY
r->magnitude = m + 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_0_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_int(rustsecp256k1_v0_4_1_fe *r, int a) {
r->n[0] *= a;
r->n[1] *= a;
r->n[2] *= a;
@ -397,13 +398,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_int(rustsecp256k1_v0_4_
#ifdef VERIFY
r->magnitude *= a;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_add(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
r->n[0] += a->n[0];
r->n[1] += a->n[1];
@ -413,41 +414,41 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_add(rustsecp256k1_v0_4_0_fe
#ifdef VERIFY
r->magnitude += a->magnitude;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_mul(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe * SECP256K1_RESTRICT b) {
static void rustsecp256k1_v0_4_1_fe_mul(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe * SECP256K1_RESTRICT b) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
VERIFY_CHECK(b->magnitude <= 8);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_0_fe_verify(b);
rustsecp256k1_v0_4_1_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
rustsecp256k1_v0_4_0_fe_mul_inner(r->n, a->n, b->n);
rustsecp256k1_v0_4_1_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_0_fe_sqr(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_sqr(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
rustsecp256k1_v0_4_0_fe_verify(a);
rustsecp256k1_v0_4_1_fe_verify(a);
#endif
rustsecp256k1_v0_4_0_fe_sqr_inner(r->n, a->n);
rustsecp256k1_v0_4_1_fe_sqr_inner(r->n, a->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
rustsecp256k1_v0_4_0_fe_verify(r);
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_cmov(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a, int flag) {
uint64_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint64_t)0);
@ -465,7 +466,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_cmov(rustsecp256k1_v0_4_0_f
#endif
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe_storage *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_storage_cmov(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe_storage *a, int flag) {
uint64_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint64_t)0);
@ -476,7 +477,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_storage_cmov(rustsecp256k1_
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
}
static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *r, const rustsecp256k1_v0_4_0_fe *a) {
static void rustsecp256k1_v0_4_1_fe_to_storage(rustsecp256k1_v0_4_1_fe_storage *r, const rustsecp256k1_v0_4_1_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
@ -486,7 +487,7 @@ static void rustsecp256k1_v0_4_0_fe_to_storage(rustsecp256k1_v0_4_0_fe_storage *
r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe_storage *a) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_fe_from_storage(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe_storage *a) {
r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
@ -498,4 +499,80 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_fe_from_storage(rustsecp256k1_
#endif
}
static void rustsecp256k1_v0_4_1_fe_from_signed62(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a) {
const uint64_t M52 = UINT64_MAX >> 12;
const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
/* The output from rustsecp256k1_v0_4_1_modinv64{_var} should be normalized to range [0,modulus), and
* have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
*/
VERIFY_CHECK(a0 >> 62 == 0);
VERIFY_CHECK(a1 >> 62 == 0);
VERIFY_CHECK(a2 >> 62 == 0);
VERIFY_CHECK(a3 >> 62 == 0);
VERIFY_CHECK(a4 >> 8 == 0);
r->n[0] = a0 & M52;
r->n[1] = (a0 >> 52 | a1 << 10) & M52;
r->n[2] = (a1 >> 42 | a2 << 20) & M52;
r->n[3] = (a2 >> 32 | a3 << 30) & M52;
r->n[4] = (a3 >> 22 | a4 << 40);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
rustsecp256k1_v0_4_1_fe_verify(r);
#endif
}
static void rustsecp256k1_v0_4_1_fe_to_signed62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_fe *a) {
const uint64_t M62 = UINT64_MAX >> 2;
const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4];
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
r->v[0] = (a0 | a1 << 52) & M62;
r->v[1] = (a1 >> 10 | a2 << 42) & M62;
r->v[2] = (a2 >> 20 | a3 << 32) & M62;
r->v[3] = (a3 >> 30 | a4 << 22) & M62;
r->v[4] = a4 >> 40;
}
static const rustsecp256k1_v0_4_1_modinv64_modinfo rustsecp256k1_v0_4_1_const_modinfo_fe = {
{{-0x1000003D1LL, 0, 0, 0, 256}},
0x27C7F6E22DDACACFLL
};
static void rustsecp256k1_v0_4_1_fe_inv(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) {
rustsecp256k1_v0_4_1_fe tmp;
rustsecp256k1_v0_4_1_modinv64_signed62 s;
tmp = *x;
rustsecp256k1_v0_4_1_fe_normalize(&tmp);
rustsecp256k1_v0_4_1_fe_to_signed62(&s, &tmp);
rustsecp256k1_v0_4_1_modinv64(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe);
rustsecp256k1_v0_4_1_fe_from_signed62(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp));
#endif
}
static void rustsecp256k1_v0_4_1_fe_inv_var(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *x) {
rustsecp256k1_v0_4_1_fe tmp;
rustsecp256k1_v0_4_1_modinv64_signed62 s;
tmp = *x;
rustsecp256k1_v0_4_1_fe_normalize_var(&tmp);
rustsecp256k1_v0_4_1_fe_to_signed62(&s, &tmp);
rustsecp256k1_v0_4_1_modinv64_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_fe);
rustsecp256k1_v0_4_1_fe_from_signed62(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_fe_normalizes_to_zero(r) == rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&tmp));
#endif
}
#endif /* SECP256K1_FIELD_REPR_IMPL_H */

View File

@ -15,7 +15,7 @@
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
uint128_t c, d;
uint64_t t3, t4, tx, u0;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
@ -154,7 +154,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_mul_inner(uint64_t *r, cons
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
uint128_t c, d;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
int64_t t3, t4, tx, u0;

View File

@ -12,7 +12,6 @@
#endif
#include "util.h"
#include "num.h"
#if defined(SECP256K1_WIDEMUL_INT128)
#include "field_5x52_impl.h"
@ -22,21 +21,21 @@
#error "Please select wide multiplication implementation"
#endif
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
rustsecp256k1_v0_4_0_fe na;
rustsecp256k1_v0_4_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_0_fe_add(&na, b);
return rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&na);
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_equal(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) {
rustsecp256k1_v0_4_1_fe na;
rustsecp256k1_v0_4_1_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_1_fe_add(&na, b);
return rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&na);
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
rustsecp256k1_v0_4_0_fe na;
rustsecp256k1_v0_4_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_0_fe_add(&na, b);
return rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&na);
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_fe_equal_var(const rustsecp256k1_v0_4_1_fe *a, const rustsecp256k1_v0_4_1_fe *b) {
rustsecp256k1_v0_4_1_fe na;
rustsecp256k1_v0_4_1_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_1_fe_add(&na, b);
return rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&na);
}
static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
static int rustsecp256k1_v0_4_1_fe_sqrt(rustsecp256k1_v0_4_1_fe *r, const rustsecp256k1_v0_4_1_fe *a) {
/** Given that p is congruent to 3 mod 4, we can compute the square root of
* a mod p as the (p+1)/4'th power of a.
*
@ -46,7 +45,7 @@ static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustse
* Also because (p+1)/4 is an even number, the computed square root is
* itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)).
*/
rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
rustsecp256k1_v0_4_1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
VERIFY_CHECK(r != a);
@ -56,265 +55,86 @@ static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustse
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_4_0_fe_sqr(&x2, a);
rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a);
rustsecp256k1_v0_4_1_fe_sqr(&x2, a);
rustsecp256k1_v0_4_1_fe_mul(&x2, &x2, a);
rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a);
rustsecp256k1_v0_4_1_fe_sqr(&x3, &x2);
rustsecp256k1_v0_4_1_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6);
rustsecp256k1_v0_4_1_fe_sqr(&x6, &x6);
}
rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3);
rustsecp256k1_v0_4_1_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9);
rustsecp256k1_v0_4_1_fe_sqr(&x9, &x9);
}
rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3);
rustsecp256k1_v0_4_1_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11);
rustsecp256k1_v0_4_1_fe_sqr(&x11, &x11);
}
rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2);
rustsecp256k1_v0_4_1_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22);
rustsecp256k1_v0_4_1_fe_sqr(&x22, &x22);
}
rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11);
rustsecp256k1_v0_4_1_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44);
rustsecp256k1_v0_4_1_fe_sqr(&x44, &x44);
}
rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22);
rustsecp256k1_v0_4_1_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88);
rustsecp256k1_v0_4_1_fe_sqr(&x88, &x88);
}
rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44);
rustsecp256k1_v0_4_1_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176);
rustsecp256k1_v0_4_1_fe_sqr(&x176, &x176);
}
rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88);
rustsecp256k1_v0_4_1_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220);
rustsecp256k1_v0_4_1_fe_sqr(&x220, &x220);
}
rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44);
rustsecp256k1_v0_4_1_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223);
rustsecp256k1_v0_4_1_fe_sqr(&x223, &x223);
}
rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3);
rustsecp256k1_v0_4_1_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22);
rustsecp256k1_v0_4_1_fe_mul(&t1, &t1, &x22);
for (j=0; j<6; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2);
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
rustsecp256k1_v0_4_0_fe_sqr(r, &t1);
rustsecp256k1_v0_4_1_fe_mul(&t1, &t1, &x2);
rustsecp256k1_v0_4_1_fe_sqr(&t1, &t1);
rustsecp256k1_v0_4_1_fe_sqr(r, &t1);
/* Check that a square root was actually calculated */
rustsecp256k1_v0_4_0_fe_sqr(&t1, r);
return rustsecp256k1_v0_4_0_fe_equal(&t1, a);
rustsecp256k1_v0_4_1_fe_sqr(&t1, r);
return rustsecp256k1_v0_4_1_fe_equal(&t1, a);
}
static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_4_0_fe_sqr(&x2, a);
rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a);
rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6);
}
rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9);
}
rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11);
}
rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22);
}
rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44);
}
rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88);
}
rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176);
}
rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220);
}
rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223);
}
rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22);
for (j=0; j<5; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, a);
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2);
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_4_0_fe_mul(r, a, &t1);
}
static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
#if defined(USE_FIELD_INV_BUILTIN)
rustsecp256k1_v0_4_0_fe_inv(r, a);
#elif defined(USE_FIELD_INV_NUM)
rustsecp256k1_v0_4_0_num n, m;
static const rustsecp256k1_v0_4_0_fe negone = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
);
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
unsigned char b[32];
int res;
rustsecp256k1_v0_4_0_fe c = *a;
rustsecp256k1_v0_4_0_fe_normalize_var(&c);
rustsecp256k1_v0_4_0_fe_get_b32(b, &c);
rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32);
rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m);
rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n);
res = rustsecp256k1_v0_4_0_fe_set_b32(r, b);
(void)res;
VERIFY_CHECK(res);
/* Verify the result is the (unique) valid inverse using non-GMP code. */
rustsecp256k1_v0_4_0_fe_mul(&c, &c, r);
rustsecp256k1_v0_4_0_fe_add(&c, &negone);
CHECK(rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&c));
#else
#error "Please select field inverse implementation"
#endif
}
static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len) {
rustsecp256k1_v0_4_0_fe u;
size_t i;
if (len < 1) {
return;
}
VERIFY_CHECK((r + len <= a) || (a + len <= r));
r[0] = a[0];
i = 0;
while (++i < len) {
rustsecp256k1_v0_4_0_fe_mul(&r[i], &r[i - 1], &a[i]);
}
rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[--i]);
while (i > 0) {
size_t j = i--;
rustsecp256k1_v0_4_0_fe_mul(&r[j], &r[i], &u);
rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[j]);
}
r[0] = u;
}
static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a) {
#ifndef USE_NUM_NONE
unsigned char b[32];
rustsecp256k1_v0_4_0_num n;
rustsecp256k1_v0_4_0_num m;
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
rustsecp256k1_v0_4_0_fe c = *a;
rustsecp256k1_v0_4_0_fe_normalize_var(&c);
rustsecp256k1_v0_4_0_fe_get_b32(b, &c);
rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32);
return rustsecp256k1_v0_4_0_num_jacobi(&n, &m) >= 0;
#else
rustsecp256k1_v0_4_0_fe r;
return rustsecp256k1_v0_4_0_fe_sqrt(&r, a);
#endif
}
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
#endif /* SECP256K1_FIELD_IMPL_H */

View File

@ -4,15 +4,21 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
// Autotools creates libsecp256k1-config.h, of which ECMULT_GEN_PREC_BITS is needed.
// ifndef guard so downstream users can define their own if they do not use autotools.
/* Autotools creates libsecp256k1-config.h, of which ECMULT_GEN_PREC_BITS is needed.
ifndef guard so downstream users can define their own if they do not use autotools. */
#if !defined(ECMULT_GEN_PREC_BITS)
#include "libsecp256k1-config.h"
#endif
#define USE_BASIC_CONFIG 1
#include "basic-config.h"
#include "include/secp256k1.h"
/* We can't require the precomputed tables when creating them. */
#undef USE_ECMULT_STATIC_PRECOMPUTATION
/* In principle we could use external ASM, but this yields only a minor speedup in
build time and it's very complicated. In particular when cross-compiling, we'd
need to build the external ASM for the build and the host machine. */
#undef USE_EXTERNAL_ASM
#include "../include/secp256k1.h"
#include "assumptions.h"
#include "util.h"
#include "field_impl.h"
@ -26,13 +32,13 @@ static void default_error_callback_fn(const char* str, void* data) {
abort();
}
static const rustsecp256k1_v0_4_0_callback default_error_callback = {
static const rustsecp256k1_v0_4_1_callback default_error_callback = {
default_error_callback_fn,
NULL
};
int main(int argc, char **argv) {
rustsecp256k1_v0_4_0_ecmult_gen_context ctx;
rustsecp256k1_v0_4_1_ecmult_gen_context ctx;
void *prealloc, *base;
int inner;
int outer;
@ -47,19 +53,19 @@ int main(int argc, char **argv) {
return -1;
}
fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#ifndef SECP256K1_ECMULT_STATIC_CONTEXT_H\n");
fprintf(fp, "#define SECP256K1_ECMULT_STATIC_CONTEXT_H\n");
fprintf(fp, "#include \"src/group.h\"\n");
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
fprintf(fp, "#if ECMULT_GEN_PREC_N != %d || ECMULT_GEN_PREC_G != %d\n", ECMULT_GEN_PREC_N, ECMULT_GEN_PREC_G);
fprintf(fp, " #error configuration mismatch, invalid ECMULT_GEN_PREC_N, ECMULT_GEN_PREC_G. Try deleting ecmult_static_context.h before the build.\n");
fprintf(fp, "#endif\n");
fprintf(fp, "static const rustsecp256k1_v0_4_0_ge_storage rustsecp256k1_v0_4_0_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n");
fprintf(fp, "static const rustsecp256k1_v0_4_1_ge_storage rustsecp256k1_v0_4_1_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n");
base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE);
prealloc = base;
rustsecp256k1_v0_4_0_ecmult_gen_context_init(&ctx);
rustsecp256k1_v0_4_0_ecmult_gen_context_build(&ctx, &prealloc);
rustsecp256k1_v0_4_1_ecmult_gen_context_init(&ctx);
rustsecp256k1_v0_4_1_ecmult_gen_context_build(&ctx, &prealloc);
for(outer = 0; outer != ECMULT_GEN_PREC_N; outer++) {
fprintf(fp,"{\n");
for(inner = 0; inner != ECMULT_GEN_PREC_G; inner++) {
@ -77,7 +83,7 @@ int main(int argc, char **argv) {
}
}
fprintf(fp,"};\n");
rustsecp256k1_v0_4_0_ecmult_gen_context_clear(&ctx);
rustsecp256k1_v0_4_1_ecmult_gen_context_clear(&ctx);
free(base);
fprintf(fp, "#undef SC\n");

View File

@ -7,135 +7,128 @@
#ifndef SECP256K1_GROUP_H
#define SECP256K1_GROUP_H
#include "num.h"
#include "field.h"
/** A group element of the secp256k1 curve, in affine coordinates. */
typedef struct {
rustsecp256k1_v0_4_0_fe x;
rustsecp256k1_v0_4_0_fe y;
rustsecp256k1_v0_4_1_fe x;
rustsecp256k1_v0_4_1_fe y;
int infinity; /* whether this represents the point at infinity */
} rustsecp256k1_v0_4_0_ge;
} rustsecp256k1_v0_4_1_ge;
#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0}
#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
/** A group element of the secp256k1 curve, in jacobian coordinates. */
typedef struct {
rustsecp256k1_v0_4_0_fe x; /* actual X: x/z^2 */
rustsecp256k1_v0_4_0_fe y; /* actual Y: y/z^3 */
rustsecp256k1_v0_4_0_fe z;
rustsecp256k1_v0_4_1_fe x; /* actual X: x/z^2 */
rustsecp256k1_v0_4_1_fe y; /* actual Y: y/z^3 */
rustsecp256k1_v0_4_1_fe z;
int infinity; /* whether this represents the point at infinity */
} rustsecp256k1_v0_4_0_gej;
} rustsecp256k1_v0_4_1_gej;
#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0}
#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
typedef struct {
rustsecp256k1_v0_4_0_fe_storage x;
rustsecp256k1_v0_4_0_fe_storage y;
} rustsecp256k1_v0_4_0_ge_storage;
rustsecp256k1_v0_4_1_fe_storage x;
rustsecp256k1_v0_4_1_fe_storage y;
} rustsecp256k1_v0_4_1_ge_storage;
#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))}
#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y)
/** Set a group element equal to the point with given X and Y coordinates */
static void rustsecp256k1_v0_4_0_ge_set_xy(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_fe *y);
/** Set a group element (affine) equal to the point with the given X coordinate
* and a Y coordinate that is a quadratic residue modulo p. The return value
* is true iff a coordinate with the given X coordinate exists.
*/
static int rustsecp256k1_v0_4_0_ge_set_xquad(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x);
static void rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_fe *y);
/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
* for Y. Return value indicates whether the result is valid. */
static int rustsecp256k1_v0_4_0_ge_set_xo_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, int odd);
static int rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, int odd);
/** Check whether a group element is the point at infinity. */
static int rustsecp256k1_v0_4_0_ge_is_infinity(const rustsecp256k1_v0_4_0_ge *a);
static int rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge *a);
/** Check whether a group element is valid (i.e., on the curve). */
static int rustsecp256k1_v0_4_0_ge_is_valid_var(const rustsecp256k1_v0_4_0_ge *a);
static int rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge *a);
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void rustsecp256k1_v0_4_0_ge_neg(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a);
static void rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a);
/** Set a group element equal to another which is given in jacobian coordinates */
static void rustsecp256k1_v0_4_0_ge_set_gej(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a);
/** Set a group element equal to another which is given in jacobian coordinates. Constant time. */
static void rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a);
/** Set a group element equal to another which is given in jacobian coordinates. */
static void rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a);
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, size_t len);
static void rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, size_t len);
/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
* the same global z "denominator". zr must contain the known z-ratios such
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y
* coordinates of the result are stored in r, the common z coordinate is
* stored in globalz. */
static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_fe *globalz, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zr);
static void rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zr);
/** Set a group element (affine) equal to the point at infinity. */
static void rustsecp256k1_v0_4_0_ge_set_infinity(rustsecp256k1_v0_4_0_ge *r);
static void rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge *r);
/** Set a group element (jacobian) equal to the point at infinity. */
static void rustsecp256k1_v0_4_0_gej_set_infinity(rustsecp256k1_v0_4_0_gej *r);
static void rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej *r);
/** Set a group element (jacobian) equal to another which is given in affine coordinates. */
static void rustsecp256k1_v0_4_0_gej_set_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a);
static void rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a);
/** Compare the X coordinate of a group element (jacobian). */
static int rustsecp256k1_v0_4_0_gej_eq_x_var(const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_gej *a);
static int rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_gej *a);
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void rustsecp256k1_v0_4_0_gej_neg(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a);
static void rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a);
/** Check whether a group element is the point at infinity. */
static int rustsecp256k1_v0_4_0_gej_is_infinity(const rustsecp256k1_v0_4_0_gej *a);
/** Check whether a group element's y coordinate is a quadratic residue. */
static int rustsecp256k1_v0_4_0_gej_has_quad_y_var(const rustsecp256k1_v0_4_0_gej *a);
static int rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej *a);
/** Set r equal to the double of a. Constant time. */
static void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a);
static void rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a);
/** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */
static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, rustsecp256k1_v0_4_0_fe *rzr);
static void rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, rustsecp256k1_v0_4_1_fe *rzr);
/** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_gej *b, rustsecp256k1_v0_4_0_fe *rzr);
static void rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b, rustsecp256k1_v0_4_1_fe *rzr);
/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */
static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b);
static void rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b);
/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
than rustsecp256k1_v0_4_0_gej_add_var. It is identical to rustsecp256k1_v0_4_0_gej_add_ge but without constant-time
than rustsecp256k1_v0_4_1_gej_add_var. It is identical to rustsecp256k1_v0_4_1_gej_add_ge but without constant-time
guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
static void rustsecp256k1_v0_4_0_gej_add_ge_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, rustsecp256k1_v0_4_0_fe *rzr);
static void rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, rustsecp256k1_v0_4_1_fe *rzr);
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, const rustsecp256k1_v0_4_0_fe *bzinv);
static void rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, const rustsecp256k1_v0_4_1_fe *bzinv);
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void rustsecp256k1_v0_4_0_ge_mul_lambda(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a);
static void rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a);
/** Clear a rustsecp256k1_v0_4_0_gej to prevent leaking sensitive information. */
static void rustsecp256k1_v0_4_0_gej_clear(rustsecp256k1_v0_4_0_gej *r);
/** Clear a rustsecp256k1_v0_4_1_gej to prevent leaking sensitive information. */
static void rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej *r);
/** Clear a rustsecp256k1_v0_4_0_ge to prevent leaking sensitive information. */
static void rustsecp256k1_v0_4_0_ge_clear(rustsecp256k1_v0_4_0_ge *r);
/** Clear a rustsecp256k1_v0_4_1_ge to prevent leaking sensitive information. */
static void rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge *r);
/** Convert a group element to the storage type. */
static void rustsecp256k1_v0_4_0_ge_to_storage(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge *a);
static void rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge *a);
/** Convert a group element back from the storage type. */
static void rustsecp256k1_v0_4_0_ge_from_storage(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge_storage *a);
static void rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void rustsecp256k1_v0_4_0_ge_storage_cmov(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge_storage *a, int flag);
static void rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge_storage *a, int flag);
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_fe *b);
static void rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_fe *b);
/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve.
*
@ -146,6 +139,6 @@ static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const
* (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this
* function checks whether a point that is on the curve is in fact also in that subgroup.
*/
static int rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_0_ge* ge);
static int rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge* ge);
#endif /* SECP256K1_GROUP_H */

View File

@ -7,7 +7,6 @@
#ifndef SECP256K1_GROUP_IMPL_H
#define SECP256K1_GROUP_IMPL_H
#include "num.h"
#include "field.h"
#include "group.h"
@ -22,24 +21,24 @@
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 13
static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST(
static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f,
0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb,
0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2,
0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24
);
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(
0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc,
0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417
);
# elif EXHAUSTIVE_TEST_ORDER == 199
static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST(
static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9,
0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18,
0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c,
0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae
);
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(
0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1,
0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb
);
@ -50,83 +49,84 @@ static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1
/** Generator for secp256k1, value 'g' defined in
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
*/
static const rustsecp256k1_v0_4_0_ge rustsecp256k1_v0_4_0_ge_const_g = SECP256K1_GE_CONST(
static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
#endif
static void rustsecp256k1_v0_4_0_ge_set_gej_zinv(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zi) {
rustsecp256k1_v0_4_0_fe zi2;
rustsecp256k1_v0_4_0_fe zi3;
rustsecp256k1_v0_4_0_fe_sqr(&zi2, zi);
rustsecp256k1_v0_4_0_fe_mul(&zi3, &zi2, zi);
rustsecp256k1_v0_4_0_fe_mul(&r->x, &a->x, &zi2);
rustsecp256k1_v0_4_0_fe_mul(&r->y, &a->y, &zi3);
static void rustsecp256k1_v0_4_1_ge_set_gej_zinv(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zi) {
rustsecp256k1_v0_4_1_fe zi2;
rustsecp256k1_v0_4_1_fe zi3;
rustsecp256k1_v0_4_1_fe_sqr(&zi2, zi);
rustsecp256k1_v0_4_1_fe_mul(&zi3, &zi2, zi);
rustsecp256k1_v0_4_1_fe_mul(&r->x, &a->x, &zi2);
rustsecp256k1_v0_4_1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity;
}
static void rustsecp256k1_v0_4_0_ge_set_xy(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_fe *y) {
static void rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_fe *y) {
r->infinity = 0;
r->x = *x;
r->y = *y;
}
static int rustsecp256k1_v0_4_0_ge_is_infinity(const rustsecp256k1_v0_4_0_ge *a) {
static int rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge *a) {
return a->infinity;
}
static void rustsecp256k1_v0_4_0_ge_neg(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a) {
static void rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) {
*r = *a;
rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1);
rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
}
static void rustsecp256k1_v0_4_0_ge_set_gej(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a) {
rustsecp256k1_v0_4_0_fe z2, z3;
static void rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) {
rustsecp256k1_v0_4_1_fe z2, z3;
r->infinity = a->infinity;
rustsecp256k1_v0_4_0_fe_inv(&a->z, &a->z);
rustsecp256k1_v0_4_0_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_4_0_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_4_0_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_4_0_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_4_0_fe_set_int(&a->z, 1);
rustsecp256k1_v0_4_1_fe_inv(&a->z, &a->z);
rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
}
static void rustsecp256k1_v0_4_0_ge_set_gej_var(rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_gej *a) {
rustsecp256k1_v0_4_0_fe z2, z3;
r->infinity = a->infinity;
static void rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) {
rustsecp256k1_v0_4_1_fe z2, z3;
if (a->infinity) {
rustsecp256k1_v0_4_1_ge_set_infinity(r);
return;
}
rustsecp256k1_v0_4_0_fe_inv_var(&a->z, &a->z);
rustsecp256k1_v0_4_0_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_4_0_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_4_0_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_4_0_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_4_0_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
rustsecp256k1_v0_4_1_fe_inv_var(&a->z, &a->z);
rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1);
rustsecp256k1_v0_4_1_ge_set_xy(r, &a->x, &a->y);
}
static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_gej *a, size_t len) {
rustsecp256k1_v0_4_0_fe u;
static void rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, size_t len) {
rustsecp256k1_v0_4_1_fe u;
size_t i;
size_t last_i = SIZE_MAX;
for (i = 0; i < len; i++) {
if (!a[i].infinity) {
if (a[i].infinity) {
rustsecp256k1_v0_4_1_ge_set_infinity(&r[i]);
} else {
/* Use destination's x coordinates as scratch space */
if (last_i == SIZE_MAX) {
r[i].x = a[i].z;
} else {
rustsecp256k1_v0_4_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
rustsecp256k1_v0_4_1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
}
last_i = i;
}
@ -134,14 +134,14 @@ static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r,
if (last_i == SIZE_MAX) {
return;
}
rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[last_i].x);
rustsecp256k1_v0_4_1_fe_inv_var(&u, &r[last_i].x);
i = last_i;
while (i > 0) {
i--;
if (!a[i].infinity) {
rustsecp256k1_v0_4_0_fe_mul(&r[last_i].x, &r[i].x, &u);
rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[last_i].z);
rustsecp256k1_v0_4_1_fe_mul(&r[last_i].x, &r[i].x, &u);
rustsecp256k1_v0_4_1_fe_mul(&u, &u, &a[last_i].z);
last_i = i;
}
}
@ -149,23 +149,22 @@ static void rustsecp256k1_v0_4_0_ge_set_all_gej_var(rustsecp256k1_v0_4_0_ge *r,
r[last_i].x = u;
for (i = 0; i < len; i++) {
r[i].infinity = a[i].infinity;
if (!a[i].infinity) {
rustsecp256k1_v0_4_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
}
}
}
static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_0_ge *r, rustsecp256k1_v0_4_0_fe *globalz, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_fe *zr) {
static void rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zr) {
size_t i = len - 1;
rustsecp256k1_v0_4_0_fe zs;
rustsecp256k1_v0_4_1_fe zs;
if (len > 0) {
/* The z of the final point gives us the "global Z" for the table. */
r[i].x = a[i].x;
r[i].y = a[i].y;
/* Ensure all y values are in weak normal form for fast negation of points */
rustsecp256k1_v0_4_0_fe_normalize_weak(&r[i].y);
rustsecp256k1_v0_4_1_fe_normalize_weak(&r[i].y);
*globalz = a[i].z;
r[i].infinity = 0;
zs = zr[i];
@ -173,104 +172,100 @@ static void rustsecp256k1_v0_4_0_ge_globalz_set_table_gej(size_t len, rustsecp25
/* Work our way backwards, using the z-ratios to scale the x/y values. */
while (i > 0) {
if (i != len - 1) {
rustsecp256k1_v0_4_0_fe_mul(&zs, &zs, &zr[i]);
rustsecp256k1_v0_4_1_fe_mul(&zs, &zs, &zr[i]);
}
i--;
rustsecp256k1_v0_4_0_ge_set_gej_zinv(&r[i], &a[i], &zs);
rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &zs);
}
}
}
static void rustsecp256k1_v0_4_0_gej_set_infinity(rustsecp256k1_v0_4_0_gej *r) {
static void rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej *r) {
r->infinity = 1;
rustsecp256k1_v0_4_0_fe_clear(&r->x);
rustsecp256k1_v0_4_0_fe_clear(&r->y);
rustsecp256k1_v0_4_0_fe_clear(&r->z);
rustsecp256k1_v0_4_1_fe_clear(&r->x);
rustsecp256k1_v0_4_1_fe_clear(&r->y);
rustsecp256k1_v0_4_1_fe_clear(&r->z);
}
static void rustsecp256k1_v0_4_0_ge_set_infinity(rustsecp256k1_v0_4_0_ge *r) {
static void rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge *r) {
r->infinity = 1;
rustsecp256k1_v0_4_0_fe_clear(&r->x);
rustsecp256k1_v0_4_0_fe_clear(&r->y);
rustsecp256k1_v0_4_1_fe_clear(&r->x);
rustsecp256k1_v0_4_1_fe_clear(&r->y);
}
static void rustsecp256k1_v0_4_0_gej_clear(rustsecp256k1_v0_4_0_gej *r) {
static void rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej *r) {
r->infinity = 0;
rustsecp256k1_v0_4_0_fe_clear(&r->x);
rustsecp256k1_v0_4_0_fe_clear(&r->y);
rustsecp256k1_v0_4_0_fe_clear(&r->z);
rustsecp256k1_v0_4_1_fe_clear(&r->x);
rustsecp256k1_v0_4_1_fe_clear(&r->y);
rustsecp256k1_v0_4_1_fe_clear(&r->z);
}
static void rustsecp256k1_v0_4_0_ge_clear(rustsecp256k1_v0_4_0_ge *r) {
static void rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge *r) {
r->infinity = 0;
rustsecp256k1_v0_4_0_fe_clear(&r->x);
rustsecp256k1_v0_4_0_fe_clear(&r->y);
rustsecp256k1_v0_4_1_fe_clear(&r->x);
rustsecp256k1_v0_4_1_fe_clear(&r->y);
}
static int rustsecp256k1_v0_4_0_ge_set_xquad(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x) {
rustsecp256k1_v0_4_0_fe x2, x3;
static int rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, int odd) {
rustsecp256k1_v0_4_1_fe x2, x3;
r->x = *x;
rustsecp256k1_v0_4_0_fe_sqr(&x2, x);
rustsecp256k1_v0_4_0_fe_mul(&x3, x, &x2);
rustsecp256k1_v0_4_1_fe_sqr(&x2, x);
rustsecp256k1_v0_4_1_fe_mul(&x3, x, &x2);
r->infinity = 0;
rustsecp256k1_v0_4_0_fe_add(&x3, &rustsecp256k1_v0_4_0_fe_const_b);
return rustsecp256k1_v0_4_0_fe_sqrt(&r->y, &x3);
}
static int rustsecp256k1_v0_4_0_ge_set_xo_var(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_fe *x, int odd) {
if (!rustsecp256k1_v0_4_0_ge_set_xquad(r, x)) {
rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b);
if (!rustsecp256k1_v0_4_1_fe_sqrt(&r->y, &x3)) {
return 0;
}
rustsecp256k1_v0_4_0_fe_normalize_var(&r->y);
if (rustsecp256k1_v0_4_0_fe_is_odd(&r->y) != odd) {
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1);
rustsecp256k1_v0_4_1_fe_normalize_var(&r->y);
if (rustsecp256k1_v0_4_1_fe_is_odd(&r->y) != odd) {
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
}
return 1;
}
static void rustsecp256k1_v0_4_0_gej_set_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_ge *a) {
static void rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
rustsecp256k1_v0_4_0_fe_set_int(&r->z, 1);
rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1);
}
static int rustsecp256k1_v0_4_0_gej_eq_x_var(const rustsecp256k1_v0_4_0_fe *x, const rustsecp256k1_v0_4_0_gej *a) {
rustsecp256k1_v0_4_0_fe r, r2;
static int rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_gej *a) {
rustsecp256k1_v0_4_1_fe r, r2;
VERIFY_CHECK(!a->infinity);
rustsecp256k1_v0_4_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_4_0_fe_mul(&r, &r, x);
r2 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&r2);
return rustsecp256k1_v0_4_0_fe_equal_var(&r, &r2);
rustsecp256k1_v0_4_1_fe_sqr(&r, &a->z); rustsecp256k1_v0_4_1_fe_mul(&r, &r, x);
r2 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&r2);
return rustsecp256k1_v0_4_1_fe_equal_var(&r, &r2);
}
static void rustsecp256k1_v0_4_0_gej_neg(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a) {
static void rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
r->z = a->z;
rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1);
rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
}
static int rustsecp256k1_v0_4_0_gej_is_infinity(const rustsecp256k1_v0_4_0_gej *a) {
static int rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej *a) {
return a->infinity;
}
static int rustsecp256k1_v0_4_0_ge_is_valid_var(const rustsecp256k1_v0_4_0_ge *a) {
rustsecp256k1_v0_4_0_fe y2, x3;
static int rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge *a) {
rustsecp256k1_v0_4_1_fe y2, x3;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
rustsecp256k1_v0_4_0_fe_sqr(&y2, &a->y);
rustsecp256k1_v0_4_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, &a->x);
rustsecp256k1_v0_4_0_fe_add(&x3, &rustsecp256k1_v0_4_0_fe_const_b);
rustsecp256k1_v0_4_0_fe_normalize_weak(&x3);
return rustsecp256k1_v0_4_0_fe_equal_var(&y2, &x3);
rustsecp256k1_v0_4_1_fe_sqr(&y2, &a->y);
rustsecp256k1_v0_4_1_fe_sqr(&x3, &a->x); rustsecp256k1_v0_4_1_fe_mul(&x3, &x3, &a->x);
rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b);
rustsecp256k1_v0_4_1_fe_normalize_weak(&x3);
return rustsecp256k1_v0_4_1_fe_equal_var(&y2, &x3);
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) {
/* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
*
* Note that there is an implementation described at
@ -278,33 +273,33 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_gej_double(rustsecp256k1_v0_4_
* which trades a multiply for a square, but in practice this is actually slower,
* mainly because it requires more normalizations.
*/
rustsecp256k1_v0_4_0_fe t1,t2,t3,t4;
rustsecp256k1_v0_4_1_fe t1,t2,t3,t4;
r->infinity = a->infinity;
rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &a->y);
rustsecp256k1_v0_4_0_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
rustsecp256k1_v0_4_0_fe_sqr(&t1, &a->x);
rustsecp256k1_v0_4_0_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
rustsecp256k1_v0_4_0_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
rustsecp256k1_v0_4_0_fe_sqr(&t3, &a->y);
rustsecp256k1_v0_4_0_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
rustsecp256k1_v0_4_0_fe_sqr(&t4, &t3);
rustsecp256k1_v0_4_0_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
rustsecp256k1_v0_4_0_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &a->y);
rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
rustsecp256k1_v0_4_1_fe_sqr(&t1, &a->x);
rustsecp256k1_v0_4_1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
rustsecp256k1_v0_4_1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
rustsecp256k1_v0_4_1_fe_sqr(&t3, &a->y);
rustsecp256k1_v0_4_1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
rustsecp256k1_v0_4_1_fe_sqr(&t4, &t3);
rustsecp256k1_v0_4_1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
rustsecp256k1_v0_4_1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
r->x = t3;
rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
rustsecp256k1_v0_4_0_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
rustsecp256k1_v0_4_0_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
rustsecp256k1_v0_4_0_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
rustsecp256k1_v0_4_0_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
rustsecp256k1_v0_4_0_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
rustsecp256k1_v0_4_0_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
rustsecp256k1_v0_4_0_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
rustsecp256k1_v0_4_1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
rustsecp256k1_v0_4_1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
rustsecp256k1_v0_4_1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
rustsecp256k1_v0_4_1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
rustsecp256k1_v0_4_1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
rustsecp256k1_v0_4_1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
rustsecp256k1_v0_4_1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
}
static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, rustsecp256k1_v0_4_0_fe *rzr) {
static void rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, rustsecp256k1_v0_4_1_fe *rzr) {
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
* y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
@ -316,25 +311,25 @@ static void rustsecp256k1_v0_4_0_gej_double_var(rustsecp256k1_v0_4_0_gej *r, con
* point will be gibberish (z = 0 but infinity = 0).
*/
if (a->infinity) {
r->infinity = 1;
rustsecp256k1_v0_4_1_gej_set_infinity(r);
if (rzr != NULL) {
rustsecp256k1_v0_4_0_fe_set_int(rzr, 1);
rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
}
return;
}
if (rzr != NULL) {
*rzr = a->y;
rustsecp256k1_v0_4_0_fe_normalize_weak(rzr);
rustsecp256k1_v0_4_0_fe_mul_int(rzr, 2);
rustsecp256k1_v0_4_1_fe_normalize_weak(rzr);
rustsecp256k1_v0_4_1_fe_mul_int(rzr, 2);
}
rustsecp256k1_v0_4_0_gej_double(r, a);
rustsecp256k1_v0_4_1_gej_double(r, a);
}
static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_gej *b, rustsecp256k1_v0_4_0_fe *rzr) {
static void rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b, rustsecp256k1_v0_4_1_fe *rzr) {
/* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_4_0_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
rustsecp256k1_v0_4_1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
@ -344,112 +339,112 @@ static void rustsecp256k1_v0_4_0_gej_add_var(rustsecp256k1_v0_4_0_gej *r, const
if (b->infinity) {
if (rzr != NULL) {
rustsecp256k1_v0_4_0_fe_set_int(rzr, 1);
rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
rustsecp256k1_v0_4_0_fe_sqr(&z22, &b->z);
rustsecp256k1_v0_4_0_fe_sqr(&z12, &a->z);
rustsecp256k1_v0_4_0_fe_mul(&u1, &a->x, &z22);
rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12);
rustsecp256k1_v0_4_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_4_0_fe_mul(&s1, &s1, &b->z);
rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2);
rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_0_gej_double_var(r, a, rzr);
rustsecp256k1_v0_4_1_fe_sqr(&z22, &b->z);
rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z);
rustsecp256k1_v0_4_1_fe_mul(&u1, &a->x, &z22);
rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
rustsecp256k1_v0_4_1_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_4_1_fe_mul(&s1, &s1, &b->z);
rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
rustsecp256k1_v0_4_0_fe_set_int(rzr, 0);
rustsecp256k1_v0_4_1_fe_set_int(rzr, 0);
}
rustsecp256k1_v0_4_0_gej_set_infinity(r);
rustsecp256k1_v0_4_1_gej_set_infinity(r);
}
return;
}
rustsecp256k1_v0_4_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2);
rustsecp256k1_v0_4_0_fe_mul(&h, &h, &b->z);
rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
rustsecp256k1_v0_4_1_fe_mul(&h, &h, &b->z);
if (rzr != NULL) {
*rzr = h;
}
rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_0_fe_add(&r->y, &h3);
rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_4_0_gej_add_ge_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, rustsecp256k1_v0_4_0_fe *rzr) {
static void rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, rustsecp256k1_v0_4_1_fe *rzr) {
/* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_4_0_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
rustsecp256k1_v0_4_1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
rustsecp256k1_v0_4_0_gej_set_ge(r, b);
rustsecp256k1_v0_4_1_gej_set_ge(r, b);
return;
}
if (b->infinity) {
if (rzr != NULL) {
rustsecp256k1_v0_4_0_fe_set_int(rzr, 1);
rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
rustsecp256k1_v0_4_0_fe_sqr(&z12, &a->z);
u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1);
rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1);
rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2);
rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_0_gej_double_var(r, a, rzr);
rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z);
u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1);
rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1);
rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
rustsecp256k1_v0_4_0_fe_set_int(rzr, 0);
rustsecp256k1_v0_4_1_fe_set_int(rzr, 0);
}
rustsecp256k1_v0_4_0_gej_set_infinity(r);
rustsecp256k1_v0_4_1_gej_set_infinity(r);
}
return;
}
rustsecp256k1_v0_4_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2);
rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
if (rzr != NULL) {
*rzr = h;
}
rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_0_fe_add(&r->y, &h3);
rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b, const rustsecp256k1_v0_4_0_fe *bzinv) {
static void rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, const rustsecp256k1_v0_4_1_fe *bzinv) {
/* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_4_0_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
rustsecp256k1_v0_4_1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (b->infinity) {
*r = *a;
return;
}
if (a->infinity) {
rustsecp256k1_v0_4_0_fe bzinv2, bzinv3;
rustsecp256k1_v0_4_1_fe bzinv2, bzinv3;
r->infinity = b->infinity;
rustsecp256k1_v0_4_0_fe_sqr(&bzinv2, bzinv);
rustsecp256k1_v0_4_0_fe_mul(&bzinv3, &bzinv2, bzinv);
rustsecp256k1_v0_4_0_fe_mul(&r->x, &b->x, &bzinv2);
rustsecp256k1_v0_4_0_fe_mul(&r->y, &b->y, &bzinv3);
rustsecp256k1_v0_4_0_fe_set_int(&r->z, 1);
rustsecp256k1_v0_4_1_fe_sqr(&bzinv2, bzinv);
rustsecp256k1_v0_4_1_fe_mul(&bzinv3, &bzinv2, bzinv);
rustsecp256k1_v0_4_1_fe_mul(&r->x, &b->x, &bzinv2);
rustsecp256k1_v0_4_1_fe_mul(&r->y, &b->y, &bzinv3);
rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1);
return;
}
r->infinity = 0;
@ -462,40 +457,40 @@ static void rustsecp256k1_v0_4_0_gej_add_zinv_var(rustsecp256k1_v0_4_0_gej *r, c
* The variable az below holds the modified Z coordinate for a, which is used
* for the computation of rx and ry, but not for rz.
*/
rustsecp256k1_v0_4_0_fe_mul(&az, &a->z, bzinv);
rustsecp256k1_v0_4_1_fe_mul(&az, &a->z, bzinv);
rustsecp256k1_v0_4_0_fe_sqr(&z12, &az);
u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1);
rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1);
rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &az);
rustsecp256k1_v0_4_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_0_fe_add(&h, &u2);
rustsecp256k1_v0_4_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_0_gej_double_var(r, a, NULL);
rustsecp256k1_v0_4_1_fe_sqr(&z12, &az);
u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1);
rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1);
rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &az);
rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_4_1_gej_double_var(r, a, NULL);
} else {
rustsecp256k1_v0_4_0_gej_set_infinity(r);
rustsecp256k1_v0_4_1_gej_set_infinity(r);
}
return;
}
rustsecp256k1_v0_4_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h, &h2);
r->z = a->z; rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, &h);
rustsecp256k1_v0_4_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_0_fe_add(&r->x, &h3); rustsecp256k1_v0_4_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_0_fe_add(&r->y, &t); rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_0_fe_add(&r->y, &h3);
rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
r->z = a->z; rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &h);
rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_gej *a, const rustsecp256k1_v0_4_0_ge *b) {
static void rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b) {
/* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
static const rustsecp256k1_v0_4_0_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
rustsecp256k1_v0_4_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
rustsecp256k1_v0_4_0_fe m_alt, rr_alt;
static const rustsecp256k1_v0_4_1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
rustsecp256k1_v0_4_1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
rustsecp256k1_v0_4_1_fe m_alt, rr_alt;
int infinity, degenerate;
VERIFY_CHECK(!b->infinity);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
@ -550,139 +545,125 @@ static void rustsecp256k1_v0_4_0_gej_add_ge(rustsecp256k1_v0_4_0_gej *r, const r
* so this covers everything.
*/
rustsecp256k1_v0_4_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */
u1 = a->x; rustsecp256k1_v0_4_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
rustsecp256k1_v0_4_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
s1 = a->y; rustsecp256k1_v0_4_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
rustsecp256k1_v0_4_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */
rustsecp256k1_v0_4_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
t = u1; rustsecp256k1_v0_4_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
m = s1; rustsecp256k1_v0_4_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
rustsecp256k1_v0_4_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */
rustsecp256k1_v0_4_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
rustsecp256k1_v0_4_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
rustsecp256k1_v0_4_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
rustsecp256k1_v0_4_1_fe_sqr(&zz, &a->z); /* z = Z1^2 */
u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */
rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
t = u1; rustsecp256k1_v0_4_1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
m = s1; rustsecp256k1_v0_4_1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
rustsecp256k1_v0_4_1_fe_sqr(&rr, &t); /* rr = T^2 (1) */
rustsecp256k1_v0_4_1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
rustsecp256k1_v0_4_1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
rustsecp256k1_v0_4_1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
/** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
* case that Z = z1z2 = 0, and this is special-cased later on). */
degenerate = rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&m) &
rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&rr);
degenerate = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&m) &
rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&rr);
/* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
* This means either x1 == beta*x2 or beta*x1 == x2, where beta is
* a nontrivial cube root of one. In either case, an alternate
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
* so we set R/M equal to this. */
rr_alt = s1;
rustsecp256k1_v0_4_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
rustsecp256k1_v0_4_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
rustsecp256k1_v0_4_1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
rustsecp256k1_v0_4_1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
rustsecp256k1_v0_4_0_fe_cmov(&rr_alt, &rr, !degenerate);
rustsecp256k1_v0_4_0_fe_cmov(&m_alt, &m, !degenerate);
rustsecp256k1_v0_4_1_fe_cmov(&rr_alt, &rr, !degenerate);
rustsecp256k1_v0_4_1_fe_cmov(&m_alt, &m, !degenerate);
/* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
* From here on out Ralt and Malt represent the numerator
* and denominator of lambda; R and M represent the explicit
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
rustsecp256k1_v0_4_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
rustsecp256k1_v0_4_0_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
rustsecp256k1_v0_4_1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
rustsecp256k1_v0_4_1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
/* These two lines use the observation that either M == Malt or M == 0,
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
* zero (which is "computed" by cmov). So the cost is one squaring
* versus two multiplications. */
rustsecp256k1_v0_4_0_fe_sqr(&n, &n);
rustsecp256k1_v0_4_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
rustsecp256k1_v0_4_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
rustsecp256k1_v0_4_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
infinity = rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
rustsecp256k1_v0_4_0_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
rustsecp256k1_v0_4_0_fe_negate(&q, &q, 1); /* q = -Q (2) */
rustsecp256k1_v0_4_0_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
rustsecp256k1_v0_4_0_fe_normalize_weak(&t);
rustsecp256k1_v0_4_1_fe_sqr(&n, &n);
rustsecp256k1_v0_4_1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
rustsecp256k1_v0_4_1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
infinity = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&r->z) & ~a->infinity;
rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
rustsecp256k1_v0_4_1_fe_negate(&q, &q, 1); /* q = -Q (2) */
rustsecp256k1_v0_4_1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
rustsecp256k1_v0_4_1_fe_normalize_weak(&t);
r->x = t; /* r->x = Ralt^2-Q (1) */
rustsecp256k1_v0_4_0_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
rustsecp256k1_v0_4_0_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
rustsecp256k1_v0_4_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
rustsecp256k1_v0_4_0_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
rustsecp256k1_v0_4_0_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
rustsecp256k1_v0_4_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_0_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
rustsecp256k1_v0_4_0_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
rustsecp256k1_v0_4_1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
rustsecp256k1_v0_4_1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
rustsecp256k1_v0_4_1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
rustsecp256k1_v0_4_1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
rustsecp256k1_v0_4_1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
rustsecp256k1_v0_4_1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
/** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
rustsecp256k1_v0_4_0_fe_cmov(&r->x, &b->x, a->infinity);
rustsecp256k1_v0_4_0_fe_cmov(&r->y, &b->y, a->infinity);
rustsecp256k1_v0_4_0_fe_cmov(&r->z, &fe_1, a->infinity);
rustsecp256k1_v0_4_1_fe_cmov(&r->x, &b->x, a->infinity);
rustsecp256k1_v0_4_1_fe_cmov(&r->y, &b->y, a->infinity);
rustsecp256k1_v0_4_1_fe_cmov(&r->z, &fe_1, a->infinity);
r->infinity = infinity;
}
static void rustsecp256k1_v0_4_0_gej_rescale(rustsecp256k1_v0_4_0_gej *r, const rustsecp256k1_v0_4_0_fe *s) {
static void rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_fe *s) {
/* Operations: 4 mul, 1 sqr */
rustsecp256k1_v0_4_0_fe zz;
VERIFY_CHECK(!rustsecp256k1_v0_4_0_fe_is_zero(s));
rustsecp256k1_v0_4_0_fe_sqr(&zz, s);
rustsecp256k1_v0_4_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, &zz);
rustsecp256k1_v0_4_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
rustsecp256k1_v0_4_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */
rustsecp256k1_v0_4_1_fe zz;
VERIFY_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(s));
rustsecp256k1_v0_4_1_fe_sqr(&zz, s);
rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &zz);
rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
}
static void rustsecp256k1_v0_4_0_ge_to_storage(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge *a) {
rustsecp256k1_v0_4_0_fe x, y;
static void rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge *a) {
rustsecp256k1_v0_4_1_fe x, y;
VERIFY_CHECK(!a->infinity);
x = a->x;
rustsecp256k1_v0_4_0_fe_normalize(&x);
rustsecp256k1_v0_4_1_fe_normalize(&x);
y = a->y;
rustsecp256k1_v0_4_0_fe_normalize(&y);
rustsecp256k1_v0_4_0_fe_to_storage(&r->x, &x);
rustsecp256k1_v0_4_0_fe_to_storage(&r->y, &y);
rustsecp256k1_v0_4_1_fe_normalize(&y);
rustsecp256k1_v0_4_1_fe_to_storage(&r->x, &x);
rustsecp256k1_v0_4_1_fe_to_storage(&r->y, &y);
}
static void rustsecp256k1_v0_4_0_ge_from_storage(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge_storage *a) {
rustsecp256k1_v0_4_0_fe_from_storage(&r->x, &a->x);
rustsecp256k1_v0_4_0_fe_from_storage(&r->y, &a->y);
static void rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge_storage *a) {
rustsecp256k1_v0_4_1_fe_from_storage(&r->x, &a->x);
rustsecp256k1_v0_4_1_fe_from_storage(&r->y, &a->y);
r->infinity = 0;
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_ge_storage_cmov(rustsecp256k1_v0_4_0_ge_storage *r, const rustsecp256k1_v0_4_0_ge_storage *a, int flag) {
rustsecp256k1_v0_4_0_fe_storage_cmov(&r->x, &a->x, flag);
rustsecp256k1_v0_4_0_fe_storage_cmov(&r->y, &a->y, flag);
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge_storage *a, int flag) {
rustsecp256k1_v0_4_1_fe_storage_cmov(&r->x, &a->x, flag);
rustsecp256k1_v0_4_1_fe_storage_cmov(&r->y, &a->y, flag);
}
static void rustsecp256k1_v0_4_0_ge_mul_lambda(rustsecp256k1_v0_4_0_ge *r, const rustsecp256k1_v0_4_0_ge *a) {
static const rustsecp256k1_v0_4_0_fe beta = SECP256K1_FE_CONST(
static void rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) {
static const rustsecp256k1_v0_4_1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
);
*r = *a;
rustsecp256k1_v0_4_0_fe_mul(&r->x, &r->x, &beta);
rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &beta);
}
static int rustsecp256k1_v0_4_0_gej_has_quad_y_var(const rustsecp256k1_v0_4_0_gej *a) {
rustsecp256k1_v0_4_0_fe yz;
if (a->infinity) {
return 0;
}
/* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as
* that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z
is */
rustsecp256k1_v0_4_0_fe_mul(&yz, &a->y, &a->z);
return rustsecp256k1_v0_4_0_fe_is_quad_var(&yz);
}
static int rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_0_ge* ge) {
static int rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge* ge) {
#ifdef EXHAUSTIVE_TEST_ORDER
rustsecp256k1_v0_4_0_gej out;
rustsecp256k1_v0_4_1_gej out;
int i;
/* A very simple EC multiplication ladder that avoids a dependency on ecmult. */
rustsecp256k1_v0_4_0_gej_set_infinity(&out);
rustsecp256k1_v0_4_1_gej_set_infinity(&out);
for (i = 0; i < 32; ++i) {
rustsecp256k1_v0_4_0_gej_double_var(&out, &out, NULL);
rustsecp256k1_v0_4_1_gej_double_var(&out, &out, NULL);
if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) {
rustsecp256k1_v0_4_0_gej_add_ge_var(&out, &out, ge, NULL);
rustsecp256k1_v0_4_1_gej_add_ge_var(&out, &out, ge, NULL);
}
}
return rustsecp256k1_v0_4_0_gej_is_infinity(&out);
return rustsecp256k1_v0_4_1_gej_is_infinity(&out);
#else
(void)ge;
/* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */

View File

@ -14,28 +14,28 @@ typedef struct {
uint32_t s[8];
uint32_t buf[16]; /* In big endian */
size_t bytes;
} rustsecp256k1_v0_4_0_sha256;
} rustsecp256k1_v0_4_1_sha256;
static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 *hash);
static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *hash, unsigned char *out32);
static void rustsecp256k1_v0_4_1_sha256_initialize(rustsecp256k1_v0_4_1_sha256 *hash);
static void rustsecp256k1_v0_4_1_sha256_write(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_4_1_sha256_finalize(rustsecp256k1_v0_4_1_sha256 *hash, unsigned char *out32);
typedef struct {
rustsecp256k1_v0_4_0_sha256 inner, outer;
} rustsecp256k1_v0_4_0_hmac_sha256;
rustsecp256k1_v0_4_1_sha256 inner, outer;
} rustsecp256k1_v0_4_1_hmac_sha256;
static void rustsecp256k1_v0_4_0_hmac_sha256_initialize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *key, size_t size);
static void rustsecp256k1_v0_4_0_hmac_sha256_write(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_4_0_hmac_sha256_finalize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, unsigned char *out32);
static void rustsecp256k1_v0_4_1_hmac_sha256_initialize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *key, size_t size);
static void rustsecp256k1_v0_4_1_hmac_sha256_write(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_4_1_hmac_sha256_finalize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, unsigned char *out32);
typedef struct {
unsigned char v[32];
unsigned char k[32];
int retry;
} rustsecp256k1_v0_4_0_rfc6979_hmac_sha256;
} rustsecp256k1_v0_4_1_rfc6979_hmac_sha256;
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng);
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng);
#endif /* SECP256K1_HASH_H */

View File

@ -34,7 +34,7 @@
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#endif
static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 *hash) {
static void rustsecp256k1_v0_4_1_sha256_initialize(rustsecp256k1_v0_4_1_sha256 *hash) {
hash->s[0] = 0x6a09e667ul;
hash->s[1] = 0xbb67ae85ul;
hash->s[2] = 0x3c6ef372ul;
@ -47,7 +47,7 @@ static void rustsecp256k1_v0_4_0_sha256_initialize(rustsecp256k1_v0_4_0_sha256 *
}
/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
static void rustsecp256k1_v0_4_0_sha256_transform(uint32_t* s, const uint32_t* chunk) {
static void rustsecp256k1_v0_4_1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
@ -129,7 +129,7 @@ static void rustsecp256k1_v0_4_0_sha256_transform(uint32_t* s, const uint32_t* c
s[7] += h;
}
static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *data, size_t len) {
static void rustsecp256k1_v0_4_1_sha256_write(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *data, size_t len) {
size_t bufsize = hash->bytes & 0x3F;
hash->bytes += len;
VERIFY_CHECK(hash->bytes >= len);
@ -139,7 +139,7 @@ static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash,
memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len);
data += chunk_len;
len -= chunk_len;
rustsecp256k1_v0_4_0_sha256_transform(hash->s, hash->buf);
rustsecp256k1_v0_4_1_sha256_transform(hash->s, hash->buf);
bufsize = 0;
}
if (len) {
@ -148,15 +148,15 @@ static void rustsecp256k1_v0_4_0_sha256_write(rustsecp256k1_v0_4_0_sha256 *hash,
}
}
static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *hash, unsigned char *out32) {
static void rustsecp256k1_v0_4_1_sha256_finalize(rustsecp256k1_v0_4_1_sha256 *hash, unsigned char *out32) {
static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uint32_t sizedesc[2];
uint32_t out[8];
int i = 0;
sizedesc[0] = BE32(hash->bytes >> 29);
sizedesc[1] = BE32(hash->bytes << 3);
rustsecp256k1_v0_4_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
rustsecp256k1_v0_4_0_sha256_write(hash, (const unsigned char*)sizedesc, 8);
rustsecp256k1_v0_4_1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
rustsecp256k1_v0_4_1_sha256_write(hash, (const unsigned char*)sizedesc, 8);
for (i = 0; i < 8; i++) {
out[i] = BE32(hash->s[i]);
hash->s[i] = 0;
@ -166,60 +166,60 @@ static void rustsecp256k1_v0_4_0_sha256_finalize(rustsecp256k1_v0_4_0_sha256 *ha
/* Initializes a sha256 struct and writes the 64 byte string
* SHA256(tag)||SHA256(tag) into it. */
static void rustsecp256k1_v0_4_0_sha256_initialize_tagged(rustsecp256k1_v0_4_0_sha256 *hash, const unsigned char *tag, size_t taglen) {
static void rustsecp256k1_v0_4_1_sha256_initialize_tagged(rustsecp256k1_v0_4_1_sha256 *hash, const unsigned char *tag, size_t taglen) {
unsigned char buf[32];
rustsecp256k1_v0_4_0_sha256_initialize(hash);
rustsecp256k1_v0_4_0_sha256_write(hash, tag, taglen);
rustsecp256k1_v0_4_0_sha256_finalize(hash, buf);
rustsecp256k1_v0_4_1_sha256_initialize(hash);
rustsecp256k1_v0_4_1_sha256_write(hash, tag, taglen);
rustsecp256k1_v0_4_1_sha256_finalize(hash, buf);
rustsecp256k1_v0_4_0_sha256_initialize(hash);
rustsecp256k1_v0_4_0_sha256_write(hash, buf, 32);
rustsecp256k1_v0_4_0_sha256_write(hash, buf, 32);
rustsecp256k1_v0_4_1_sha256_initialize(hash);
rustsecp256k1_v0_4_1_sha256_write(hash, buf, 32);
rustsecp256k1_v0_4_1_sha256_write(hash, buf, 32);
}
static void rustsecp256k1_v0_4_0_hmac_sha256_initialize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
static void rustsecp256k1_v0_4_1_hmac_sha256_initialize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
size_t n;
unsigned char rkey[64];
if (keylen <= sizeof(rkey)) {
memcpy(rkey, key, keylen);
memset(rkey + keylen, 0, sizeof(rkey) - keylen);
} else {
rustsecp256k1_v0_4_0_sha256 sha256;
rustsecp256k1_v0_4_0_sha256_initialize(&sha256);
rustsecp256k1_v0_4_0_sha256_write(&sha256, key, keylen);
rustsecp256k1_v0_4_0_sha256_finalize(&sha256, rkey);
rustsecp256k1_v0_4_1_sha256 sha256;
rustsecp256k1_v0_4_1_sha256_initialize(&sha256);
rustsecp256k1_v0_4_1_sha256_write(&sha256, key, keylen);
rustsecp256k1_v0_4_1_sha256_finalize(&sha256, rkey);
memset(rkey + 32, 0, 32);
}
rustsecp256k1_v0_4_0_sha256_initialize(&hash->outer);
rustsecp256k1_v0_4_1_sha256_initialize(&hash->outer);
for (n = 0; n < sizeof(rkey); n++) {
rkey[n] ^= 0x5c;
}
rustsecp256k1_v0_4_0_sha256_write(&hash->outer, rkey, sizeof(rkey));
rustsecp256k1_v0_4_1_sha256_write(&hash->outer, rkey, sizeof(rkey));
rustsecp256k1_v0_4_0_sha256_initialize(&hash->inner);
rustsecp256k1_v0_4_1_sha256_initialize(&hash->inner);
for (n = 0; n < sizeof(rkey); n++) {
rkey[n] ^= 0x5c ^ 0x36;
}
rustsecp256k1_v0_4_0_sha256_write(&hash->inner, rkey, sizeof(rkey));
rustsecp256k1_v0_4_1_sha256_write(&hash->inner, rkey, sizeof(rkey));
memset(rkey, 0, sizeof(rkey));
}
static void rustsecp256k1_v0_4_0_hmac_sha256_write(rustsecp256k1_v0_4_0_hmac_sha256 *hash, const unsigned char *data, size_t size) {
rustsecp256k1_v0_4_0_sha256_write(&hash->inner, data, size);
static void rustsecp256k1_v0_4_1_hmac_sha256_write(rustsecp256k1_v0_4_1_hmac_sha256 *hash, const unsigned char *data, size_t size) {
rustsecp256k1_v0_4_1_sha256_write(&hash->inner, data, size);
}
static void rustsecp256k1_v0_4_0_hmac_sha256_finalize(rustsecp256k1_v0_4_0_hmac_sha256 *hash, unsigned char *out32) {
static void rustsecp256k1_v0_4_1_hmac_sha256_finalize(rustsecp256k1_v0_4_1_hmac_sha256 *hash, unsigned char *out32) {
unsigned char temp[32];
rustsecp256k1_v0_4_0_sha256_finalize(&hash->inner, temp);
rustsecp256k1_v0_4_0_sha256_write(&hash->outer, temp, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&hash->inner, temp);
rustsecp256k1_v0_4_1_sha256_write(&hash->outer, temp, 32);
memset(temp, 0, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&hash->outer, out32);
rustsecp256k1_v0_4_1_sha256_finalize(&hash->outer, out32);
}
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
rustsecp256k1_v0_4_0_hmac_sha256 hmac;
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
rustsecp256k1_v0_4_1_hmac_sha256 hmac;
static const unsigned char zero[1] = {0x00};
static const unsigned char one[1] = {0x01};
@ -227,47 +227,47 @@ static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0
memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
/* RFC6979 3.2.d. */
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v);
/* RFC6979 3.2.f. */
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, one, 1);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, one, 1);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v);
rng->retry = 0;
}
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
/* RFC6979 3.2.h. */
static const unsigned char zero[1] = {0x00};
if (rng->retry) {
rustsecp256k1_v0_4_0_hmac_sha256 hmac;
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_4_1_hmac_sha256 hmac;
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v);
}
while (outlen > 0) {
rustsecp256k1_v0_4_0_hmac_sha256 hmac;
rustsecp256k1_v0_4_1_hmac_sha256 hmac;
int now = outlen;
rustsecp256k1_v0_4_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_0_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_4_1_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_4_1_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_4_1_hmac_sha256_finalize(&hmac, rng->v);
if (now > 32) {
now = 32;
}
@ -279,7 +279,7 @@ static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_4
rng->retry = 1;
}
static void rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 *rng) {
static void rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 *rng) {
memset(rng->k, 0, 32);
memset(rng->v, 0, 32);
rng->retry = 0;

View File

@ -0,0 +1,42 @@
/***********************************************************************
* Copyright (c) 2020 Peter Dettman *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODINV32_H
#define SECP256K1_MODINV32_H
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
/* A signed 30-bit limb representation of integers.
*
* Its value is sum(v[i] * 2^(30*i), i=0..8). */
typedef struct {
int32_t v[9];
} rustsecp256k1_v0_4_1_modinv32_signed30;
typedef struct {
/* The modulus in signed30 notation, must be odd and in [3, 2^256]. */
rustsecp256k1_v0_4_1_modinv32_signed30 modulus;
/* modulus^{-1} mod 2^30 */
uint32_t modulus_inv30;
} rustsecp256k1_v0_4_1_modinv32_modinfo;
/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus).
* If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of
* x and modulus must be 1). These rules are automatically satisfied if the modulus is prime.
*
* On output, all of x's limbs will be in [0, 2^30).
*/
static void rustsecp256k1_v0_4_1_modinv32_var(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo);
/* Same as rustsecp256k1_v0_4_1_modinv32_var, but constant time in x (not in the modulus). */
static void rustsecp256k1_v0_4_1_modinv32(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo);
#endif /* SECP256K1_MODINV32_H */

View File

@ -0,0 +1,587 @@
/***********************************************************************
* Copyright (c) 2020 Peter Dettman *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODINV32_IMPL_H
#define SECP256K1_MODINV32_IMPL_H
#include "modinv32.h"
#include "util.h"
#include <stdlib.h>
/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
* modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
*
* For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
* implementation for N=30, using 30-bit signed limbs represented as int32_t.
*/
#ifdef VERIFY
static const rustsecp256k1_v0_4_1_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}};
/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */
static void rustsecp256k1_v0_4_1_modinv32_mul_30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a, int alen, int32_t factor) {
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
int64_t c = 0;
int i;
for (i = 0; i < 8; ++i) {
if (i < alen) c += (int64_t)a->v[i] * factor;
r->v[i] = (int32_t)c & M30; c >>= 30;
}
if (8 < alen) c += (int64_t)a->v[8] * factor;
VERIFY_CHECK(c == (int32_t)c);
r->v[8] = (int32_t)c;
}
/* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A consists of alen limbs; b has 9. */
static int rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(const rustsecp256k1_v0_4_1_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_4_1_modinv32_signed30 *b, int32_t factor) {
int i;
rustsecp256k1_v0_4_1_modinv32_signed30 am, bm;
rustsecp256k1_v0_4_1_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */
rustsecp256k1_v0_4_1_modinv32_mul_30(&bm, b, 9, factor);
for (i = 0; i < 8; ++i) {
/* Verify that all but the top limb of a and b are normalized. */
VERIFY_CHECK(am.v[i] >> 30 == 0);
VERIFY_CHECK(bm.v[i] >> 30 == 0);
}
for (i = 8; i >= 0; --i) {
if (am.v[i] < bm.v[i]) return -1;
if (am.v[i] > bm.v[i]) return 1;
}
return 0;
}
#endif
/* Take as input a signed30 number in range (-2*modulus,modulus), and add a multiple of the modulus
* to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
* process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range
* [0,2^30). */
static void rustsecp256k1_v0_4_1_modinv32_normalize_30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) {
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4],
r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8];
int32_t cond_add, cond_negate;
#ifdef VERIFY
/* Verify that all limbs are in range (-2^30,2^30). */
int i;
for (i = 0; i < 9; ++i) {
VERIFY_CHECK(r->v[i] >= -M30);
VERIFY_CHECK(r->v[i] <= M30);
}
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
/* In a first step, add the modulus if the input is negative, and then negate if requested.
* This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
* limbs are in range (-2^30,2^30), this cannot overflow an int32_t. Note that the right
* shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
* indeed the behavior of the right shift operator). */
cond_add = r8 >> 31;
r0 += modinfo->modulus.v[0] & cond_add;
r1 += modinfo->modulus.v[1] & cond_add;
r2 += modinfo->modulus.v[2] & cond_add;
r3 += modinfo->modulus.v[3] & cond_add;
r4 += modinfo->modulus.v[4] & cond_add;
r5 += modinfo->modulus.v[5] & cond_add;
r6 += modinfo->modulus.v[6] & cond_add;
r7 += modinfo->modulus.v[7] & cond_add;
r8 += modinfo->modulus.v[8] & cond_add;
cond_negate = sign >> 31;
r0 = (r0 ^ cond_negate) - cond_negate;
r1 = (r1 ^ cond_negate) - cond_negate;
r2 = (r2 ^ cond_negate) - cond_negate;
r3 = (r3 ^ cond_negate) - cond_negate;
r4 = (r4 ^ cond_negate) - cond_negate;
r5 = (r5 ^ cond_negate) - cond_negate;
r6 = (r6 ^ cond_negate) - cond_negate;
r7 = (r7 ^ cond_negate) - cond_negate;
r8 = (r8 ^ cond_negate) - cond_negate;
/* Propagate the top bits, to bring limbs back to range (-2^30,2^30). */
r1 += r0 >> 30; r0 &= M30;
r2 += r1 >> 30; r1 &= M30;
r3 += r2 >> 30; r2 &= M30;
r4 += r3 >> 30; r3 &= M30;
r5 += r4 >> 30; r4 &= M30;
r6 += r5 >> 30; r5 &= M30;
r7 += r6 >> 30; r6 &= M30;
r8 += r7 >> 30; r7 &= M30;
/* In a second step add the modulus again if the result is still negative, bringing r to range
* [0,modulus). */
cond_add = r8 >> 31;
r0 += modinfo->modulus.v[0] & cond_add;
r1 += modinfo->modulus.v[1] & cond_add;
r2 += modinfo->modulus.v[2] & cond_add;
r3 += modinfo->modulus.v[3] & cond_add;
r4 += modinfo->modulus.v[4] & cond_add;
r5 += modinfo->modulus.v[5] & cond_add;
r6 += modinfo->modulus.v[6] & cond_add;
r7 += modinfo->modulus.v[7] & cond_add;
r8 += modinfo->modulus.v[8] & cond_add;
/* And propagate again. */
r1 += r0 >> 30; r0 &= M30;
r2 += r1 >> 30; r1 &= M30;
r3 += r2 >> 30; r2 &= M30;
r4 += r3 >> 30; r3 &= M30;
r5 += r4 >> 30; r4 &= M30;
r6 += r5 >> 30; r5 &= M30;
r7 += r6 >> 30; r6 &= M30;
r8 += r7 >> 30; r7 &= M30;
r->v[0] = r0;
r->v[1] = r1;
r->v[2] = r2;
r->v[3] = r3;
r->v[4] = r4;
r->v[5] = r5;
r->v[6] = r6;
r->v[7] = r7;
r->v[8] = r8;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 30 == 0);
VERIFY_CHECK(r1 >> 30 == 0);
VERIFY_CHECK(r2 >> 30 == 0);
VERIFY_CHECK(r3 >> 30 == 0);
VERIFY_CHECK(r4 >> 30 == 0);
VERIFY_CHECK(r5 >> 30 == 0);
VERIFY_CHECK(r6 >> 30 == 0);
VERIFY_CHECK(r7 >> 30 == 0);
VERIFY_CHECK(r8 >> 30 == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Data type for transition matrices (see section 3 of explanation).
*
* t = [ u v ]
* [ q r ]
*/
typedef struct {
int32_t u, v, q, r;
} rustsecp256k1_v0_4_1_modinv32_trans2x2;
/* Compute the transition matrix and zeta for 30 divsteps.
*
* Input: zeta: initial zeta
* f0: bottom limb of initial f
* g0: bottom limb of initial g
* Output: t: transition matrix
* Return: final zeta
*
* Implements the divsteps_n_matrix function from the explanation.
*/
static int32_t rustsecp256k1_v0_4_1_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) {
/* u,v,q,r are the elements of the transformation matrix being built up,
* starting with the identity matrix. Semantically they are signed integers
* in range [-2^30,2^30], but here represented as unsigned mod 2^32. This
* permits left shifting (which is UB for negative numbers). The range
* being inside [-2^31,2^31) means that casting to signed works correctly.
*/
uint32_t u = 1, v = 0, q = 0, r = 1;
uint32_t c1, c2, f = f0, g = g0, x, y, z;
int i;
for (i = 0; i < 30; ++i) {
VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
VERIFY_CHECK((u * f0 + v * g0) == f << i);
VERIFY_CHECK((q * f0 + r * g0) == g << i);
/* Compute conditional masks for (zeta < 0) and for (g & 1). */
c1 = zeta >> 31;
c2 = -(g & 1);
/* Compute x,y,z, conditionally negated versions of f,u,v. */
x = (f ^ c1) - c1;
y = (u ^ c1) - c1;
z = (v ^ c1) - c1;
/* Conditionally add x,y,z to g,q,r. */
g += x & c2;
q += y & c2;
r += z & c2;
/* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
c1 &= c2;
/* Conditionally change zeta into -zeta-2 or zeta-1. */
zeta = (zeta ^ c1) - 1;
/* Conditionally add g,q,r to f,u,v. */
f += g & c1;
u += q & c1;
v += r & c1;
/* Shifts */
g >>= 1;
u <<= 1;
v <<= 1;
/* Bounds on zeta that follow from the bounds on iteration count (max 20*30 divsteps). */
VERIFY_CHECK(zeta >= -601 && zeta <= 601);
}
/* Return data in t and return value. */
t->u = (int32_t)u;
t->v = (int32_t)v;
t->q = (int32_t)q;
t->r = (int32_t)r;
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 30 of them will have determinant 2^30. */
VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30);
return zeta;
}
/* Compute the transition matrix and eta for 30 divsteps (variable time).
*
* Input: eta: initial eta
* f0: bottom limb of initial f
* g0: bottom limb of initial g
* Output: t: transition matrix
* Return: final eta
*
* Implements the divsteps_n_matrix_var function from the explanation.
*/
static int32_t rustsecp256k1_v0_4_1_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) {
/* inv256[i] = -(2*i+1)^-1 (mod 256) */
static const uint8_t inv256[128] = {
0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59,
0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31,
0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89,
0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61,
0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9,
0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91,
0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9,
0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1,
0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19,
0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1,
0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01
};
/* Transformation matrix; see comments in rustsecp256k1_v0_4_1_modinv32_divsteps_30. */
uint32_t u = 1, v = 0, q = 0, r = 1;
uint32_t f = f0, g = g0, m;
uint16_t w;
int i = 30, limit, zeros;
for (;;) {
/* Use a sentinel bit to count zeros only up to i. */
zeros = rustsecp256k1_v0_4_1_ctz32_var(g | (UINT32_MAX << i));
/* Perform zeros divsteps at once; they all just divide g by two. */
g >>= zeros;
u <<= zeros;
v <<= zeros;
eta -= zeros;
i -= zeros;
/* We're done once we've done 30 divsteps. */
if (i == 0) break;
VERIFY_CHECK((f & 1) == 1);
VERIFY_CHECK((g & 1) == 1);
VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i));
VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i));
/* Bounds on eta that follow from the bounds on iteration count (max 25*30 divsteps). */
VERIFY_CHECK(eta >= -751 && eta <= 751);
/* If eta is negative, negate it and replace f,g with g,-f. */
if (eta < 0) {
uint32_t tmp;
eta = -eta;
tmp = f; f = g; g = -tmp;
tmp = u; u = q; q = -tmp;
tmp = v; v = r; r = -tmp;
}
/* eta is now >= 0. In what follows we're going to cancel out the bottom bits of g. No more
* than i can be cancelled out (as we'd be done before that point), and no more than eta+1
* can be done as its sign will flip once that happens. */
limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
/* m is a mask for the bottom min(limit, 8) bits (our table only supports 8 bits). */
VERIFY_CHECK(limit > 0 && limit <= 30);
m = (UINT32_MAX >> (32 - limit)) & 255U;
/* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */
w = (g * inv256[(f >> 1) & 127]) & m;
/* Do so. */
g += f * w;
q += u * w;
r += v * w;
VERIFY_CHECK((g & m) == 0);
}
/* Return data in t and return value. */
t->u = (int32_t)u;
t->v = (int32_t)v;
t->q = (int32_t)q;
t->r = (int32_t)r;
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 30 of them will have determinant 2^30. */
VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30);
return eta;
}
/* Compute (t/2^30) * [d, e] mod modulus, where t is a transition matrix for 30 divsteps.
*
* On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
* (-2^30,2^30).
*
* This implements the update_de function from the explanation.
*/
static void rustsecp256k1_v0_4_1_modinv32_update_de_30(rustsecp256k1_v0_4_1_modinv32_signed30 *d, rustsecp256k1_v0_4_1_modinv32_signed30 *e, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t, const rustsecp256k1_v0_4_1_modinv32_modinfo* modinfo) {
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
int32_t di, ei, md, me, sd, se;
int64_t cd, ce;
int i;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */
VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */
VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */
VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d->v[8] >> 31;
se = e->v[8] >> 31;
md = (u & sd) + (v & se);
me = (q & sd) + (r & se);
/* Begin computing t*[d,e]. */
di = d->v[0];
ei = e->v[0];
cd = (int64_t)u * di + (int64_t)v * ei;
ce = (int64_t)q * di + (int64_t)r * ei;
/* Correct md,me so that t*[d,e]+modulus*[md,me] has 30 zero bottom bits. */
md -= (modinfo->modulus_inv30 * (uint32_t)cd + md) & M30;
me -= (modinfo->modulus_inv30 * (uint32_t)ce + me) & M30;
/* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
cd += (int64_t)modinfo->modulus.v[0] * md;
ce += (int64_t)modinfo->modulus.v[0] * me;
/* Verify that the low 30 bits of the computation are indeed zero, and then throw them away. */
VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30;
VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30;
/* Now iteratively compute limb i=1..8 of t*[d,e]+modulus*[md,me], and store them in output
* limb i-1 (shifting down by 30 bits). */
for (i = 1; i < 9; ++i) {
di = d->v[i];
ei = e->v[i];
cd += (int64_t)u * di + (int64_t)v * ei;
ce += (int64_t)q * di + (int64_t)r * ei;
cd += (int64_t)modinfo->modulus.v[i] * md;
ce += (int64_t)modinfo->modulus.v[i] * me;
d->v[i - 1] = (int32_t)cd & M30; cd >>= 30;
e->v[i - 1] = (int32_t)ce & M30; ce >>= 30;
}
/* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */
d->v[8] = (int32_t)cd;
e->v[8] = (int32_t)ce;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps.
*
* This implements the update_fg function from the explanation.
*/
static void rustsecp256k1_v0_4_1_modinv32_update_fg_30(rustsecp256k1_v0_4_1_modinv32_signed30 *f, rustsecp256k1_v0_4_1_modinv32_signed30 *g, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) {
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
int32_t fi, gi;
int64_t cf, cg;
int i;
/* Start computing t*[f,g]. */
fi = f->v[0];
gi = g->v[0];
cf = (int64_t)u * fi + (int64_t)v * gi;
cg = (int64_t)q * fi + (int64_t)r * gi;
/* Verify that the bottom 30 bits of the result are zero, and then throw them away. */
VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30;
VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30;
/* Now iteratively compute limb i=1..8 of t*[f,g], and store them in output limb i-1 (shifting
* down by 30 bits). */
for (i = 1; i < 9; ++i) {
fi = f->v[i];
gi = g->v[i];
cf += (int64_t)u * fi + (int64_t)v * gi;
cg += (int64_t)q * fi + (int64_t)r * gi;
f->v[i - 1] = (int32_t)cf & M30; cf >>= 30;
g->v[i - 1] = (int32_t)cg & M30; cg >>= 30;
}
/* What remains is limb 9 of t*[f,g]; store it as output limb 8. */
f->v[8] = (int32_t)cf;
g->v[8] = (int32_t)cg;
}
/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps.
*
* Version that operates on a variable number of limbs in f and g.
*
* This implements the update_fg function from the explanation in modinv64_impl.h.
*/
static void rustsecp256k1_v0_4_1_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_4_1_modinv32_signed30 *f, rustsecp256k1_v0_4_1_modinv32_signed30 *g, const rustsecp256k1_v0_4_1_modinv32_trans2x2 *t) {
const int32_t M30 = (int32_t)(UINT32_MAX >> 2);
const int32_t u = t->u, v = t->v, q = t->q, r = t->r;
int32_t fi, gi;
int64_t cf, cg;
int i;
VERIFY_CHECK(len > 0);
/* Start computing t*[f,g]. */
fi = f->v[0];
gi = g->v[0];
cf = (int64_t)u * fi + (int64_t)v * gi;
cg = (int64_t)q * fi + (int64_t)r * gi;
/* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30;
VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30;
/* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
* down by 30 bits). */
for (i = 1; i < len; ++i) {
fi = f->v[i];
gi = g->v[i];
cf += (int64_t)u * fi + (int64_t)v * gi;
cg += (int64_t)q * fi + (int64_t)r * gi;
f->v[i - 1] = (int32_t)cf & M30; cf >>= 30;
g->v[i - 1] = (int32_t)cg & M30; cg >>= 30;
}
/* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
f->v[len - 1] = (int32_t)cf;
g->v[len - 1] = (int32_t)cg;
}
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
static void rustsecp256k1_v0_4_1_modinv32(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) {
/* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
rustsecp256k1_v0_4_1_modinv32_signed30 d = {{0}};
rustsecp256k1_v0_4_1_modinv32_signed30 e = {{1}};
rustsecp256k1_v0_4_1_modinv32_signed30 f = modinfo->modulus;
rustsecp256k1_v0_4_1_modinv32_signed30 g = *x;
int i;
int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */
/* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */
for (i = 0; i < 20; ++i) {
/* Compute transition matrix and new zeta after 30 divsteps. */
rustsecp256k1_v0_4_1_modinv32_trans2x2 t;
zeta = rustsecp256k1_v0_4_1_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t);
/* Update d,e using that transition matrix. */
rustsecp256k1_v0_4_1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
rustsecp256k1_v0_4_1_modinv32_update_fg_30(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
rustsecp256k1_v0_4_1_modinv32_normalize_30(&d, f.v[8], modinfo);
*x = d;
}
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
static void rustsecp256k1_v0_4_1_modinv32_var(rustsecp256k1_v0_4_1_modinv32_signed30 *x, const rustsecp256k1_v0_4_1_modinv32_modinfo *modinfo) {
/* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
rustsecp256k1_v0_4_1_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv32_signed30 f = modinfo->modulus;
rustsecp256k1_v0_4_1_modinv32_signed30 g = *x;
#ifdef VERIFY
int i = 0;
#endif
int j, len = 9;
int32_t eta = -1; /* eta = -delta; delta is initially 1 (faster for the variable-time code) */
int32_t cond, fn, gn;
/* Do iterations of 30 divsteps each until g=0. */
while (1) {
/* Compute transition matrix and new eta after 30 divsteps. */
rustsecp256k1_v0_4_1_modinv32_trans2x2 t;
eta = rustsecp256k1_v0_4_1_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t);
/* Update d,e using that transition matrix. */
rustsecp256k1_v0_4_1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
rustsecp256k1_v0_4_1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of g is 0, there is a chance g=0. */
if (g.v[0] == 0) {
cond = 0;
/* Check if all other limbs are also 0. */
for (j = 1; j < len; ++j) {
cond |= g.v[j];
}
/* If so, we're done. */
if (cond == 0) break;
}
/* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
fn = f.v[len - 1];
gn = g.v[len - 1];
cond = ((int32_t)len - 2) >> 31;
cond |= fn ^ (fn >> 31);
cond |= gn ^ (gn >> 31);
/* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
if (cond == 0) {
f.v[len - 2] |= (uint32_t)fn << 30;
g.v[len - 2] |= (uint32_t)gn << 30;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 ||
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 ||
(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
rustsecp256k1_v0_4_1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
rustsecp256k1_v0_4_1_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
*x = d;
}
#endif /* SECP256K1_MODINV32_IMPL_H */

View File

@ -0,0 +1,46 @@
/***********************************************************************
* Copyright (c) 2020 Peter Dettman *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODINV64_H
#define SECP256K1_MODINV64_H
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
#ifndef SECP256K1_WIDEMUL_INT128
#error "modinv64 requires 128-bit wide multiplication support"
#endif
/* A signed 62-bit limb representation of integers.
*
* Its value is sum(v[i] * 2^(62*i), i=0..4). */
typedef struct {
int64_t v[5];
} rustsecp256k1_v0_4_1_modinv64_signed62;
typedef struct {
/* The modulus in signed62 notation, must be odd and in [3, 2^256]. */
rustsecp256k1_v0_4_1_modinv64_signed62 modulus;
/* modulus^{-1} mod 2^62 */
uint64_t modulus_inv62;
} rustsecp256k1_v0_4_1_modinv64_modinfo;
/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus).
* If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of
* x and modulus must be 1). These rules are automatically satisfied if the modulus is prime.
*
* On output, all of x's limbs will be in [0, 2^62).
*/
static void rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo);
/* Same as rustsecp256k1_v0_4_1_modinv64_var, but constant time in x (not in the modulus). */
static void rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo);
#endif /* SECP256K1_MODINV64_H */

View File

@ -0,0 +1,593 @@
/***********************************************************************
* Copyright (c) 2020 Peter Dettman *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODINV64_IMPL_H
#define SECP256K1_MODINV64_IMPL_H
#include "modinv64.h"
#include "util.h"
/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
* modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
*
* For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
* implementation for N=62, using 62-bit signed limbs represented as int64_t.
*/
#ifdef VERIFY
/* Helper function to compute the absolute value of an int64_t.
* (we don't use abs/labs/llabs as it depends on the int sizes). */
static int64_t rustsecp256k1_v0_4_1_modinv64_abs(int64_t v) {
VERIFY_CHECK(v > INT64_MIN);
if (v < 0) return -v;
return v;
}
static const rustsecp256k1_v0_4_1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}};
/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */
static void rustsecp256k1_v0_4_1_modinv64_mul_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, int64_t factor) {
const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
int128_t c = 0;
int i;
for (i = 0; i < 4; ++i) {
if (i < alen) c += (int128_t)a->v[i] * factor;
r->v[i] = (int64_t)c & M62; c >>= 62;
}
if (4 < alen) c += (int128_t)a->v[4] * factor;
VERIFY_CHECK(c == (int64_t)c);
r->v[4] = (int64_t)c;
}
/* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */
static int rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_4_1_modinv64_signed62 *b, int64_t factor) {
int i;
rustsecp256k1_v0_4_1_modinv64_signed62 am, bm;
rustsecp256k1_v0_4_1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */
rustsecp256k1_v0_4_1_modinv64_mul_62(&bm, b, 5, factor);
for (i = 0; i < 4; ++i) {
/* Verify that all but the top limb of a and b are normalized. */
VERIFY_CHECK(am.v[i] >> 62 == 0);
VERIFY_CHECK(bm.v[i] >> 62 == 0);
}
for (i = 4; i >= 0; --i) {
if (am.v[i] < bm.v[i]) return -1;
if (am.v[i] > bm.v[i]) return 1;
}
return 0;
}
#endif
/* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus
* to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
* process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range
* [0,2^62). */
static void rustsecp256k1_v0_4_1_modinv64_normalize_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4];
int64_t cond_add, cond_negate;
#ifdef VERIFY
/* Verify that all limbs are in range (-2^62,2^62). */
int i;
for (i = 0; i < 5; ++i) {
VERIFY_CHECK(r->v[i] >= -M62);
VERIFY_CHECK(r->v[i] <= M62);
}
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
/* In a first step, add the modulus if the input is negative, and then negate if requested.
* This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
* limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right
* shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
* indeed the behavior of the right shift operator). */
cond_add = r4 >> 63;
r0 += modinfo->modulus.v[0] & cond_add;
r1 += modinfo->modulus.v[1] & cond_add;
r2 += modinfo->modulus.v[2] & cond_add;
r3 += modinfo->modulus.v[3] & cond_add;
r4 += modinfo->modulus.v[4] & cond_add;
cond_negate = sign >> 63;
r0 = (r0 ^ cond_negate) - cond_negate;
r1 = (r1 ^ cond_negate) - cond_negate;
r2 = (r2 ^ cond_negate) - cond_negate;
r3 = (r3 ^ cond_negate) - cond_negate;
r4 = (r4 ^ cond_negate) - cond_negate;
/* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */
r1 += r0 >> 62; r0 &= M62;
r2 += r1 >> 62; r1 &= M62;
r3 += r2 >> 62; r2 &= M62;
r4 += r3 >> 62; r3 &= M62;
/* In a second step add the modulus again if the result is still negative, bringing
* r to range [0,modulus). */
cond_add = r4 >> 63;
r0 += modinfo->modulus.v[0] & cond_add;
r1 += modinfo->modulus.v[1] & cond_add;
r2 += modinfo->modulus.v[2] & cond_add;
r3 += modinfo->modulus.v[3] & cond_add;
r4 += modinfo->modulus.v[4] & cond_add;
/* And propagate again. */
r1 += r0 >> 62; r0 &= M62;
r2 += r1 >> 62; r1 &= M62;
r3 += r2 >> 62; r2 &= M62;
r4 += r3 >> 62; r3 &= M62;
r->v[0] = r0;
r->v[1] = r1;
r->v[2] = r2;
r->v[3] = r3;
r->v[4] = r4;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 62 == 0);
VERIFY_CHECK(r1 >> 62 == 0);
VERIFY_CHECK(r2 >> 62 == 0);
VERIFY_CHECK(r3 >> 62 == 0);
VERIFY_CHECK(r4 >> 62 == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Data type for transition matrices (see section 3 of explanation).
*
* t = [ u v ]
* [ q r ]
*/
typedef struct {
int64_t u, v, q, r;
} rustsecp256k1_v0_4_1_modinv64_trans2x2;
/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
* Note that the transformation matrix is scaled by 2^62 and not 2^59.
*
* Input: zeta: initial zeta
* f0: bottom limb of initial f
* g0: bottom limb of initial g
* Output: t: transition matrix
* Return: final zeta
*
* Implements the divsteps_n_matrix function from the explanation.
*/
static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
/* u,v,q,r are the elements of the transformation matrix being built up,
* starting with the identity matrix times 8 (because the caller expects
* a result scaled by 2^62). Semantically they are signed integers
* in range [-2^62,2^62], but here represented as unsigned mod 2^64. This
* permits left shifting (which is UB for negative numbers). The range
* being inside [-2^63,2^63) means that casting to signed works correctly.
*/
uint64_t u = 8, v = 0, q = 0, r = 8;
uint64_t c1, c2, f = f0, g = g0, x, y, z;
int i;
for (i = 3; i < 62; ++i) {
VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
VERIFY_CHECK((u * f0 + v * g0) == f << i);
VERIFY_CHECK((q * f0 + r * g0) == g << i);
/* Compute conditional masks for (zeta < 0) and for (g & 1). */
c1 = zeta >> 63;
c2 = -(g & 1);
/* Compute x,y,z, conditionally negated versions of f,u,v. */
x = (f ^ c1) - c1;
y = (u ^ c1) - c1;
z = (v ^ c1) - c1;
/* Conditionally add x,y,z to g,q,r. */
g += x & c2;
q += y & c2;
r += z & c2;
/* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
c1 &= c2;
/* Conditionally change zeta into -zeta-2 or zeta-1. */
zeta = (zeta ^ c1) - 1;
/* Conditionally add g,q,r to f,u,v. */
f += g & c1;
u += q & c1;
v += r & c1;
/* Shifts */
g >>= 1;
u <<= 1;
v <<= 1;
/* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */
VERIFY_CHECK(zeta >= -591 && zeta <= 591);
}
/* Return data in t and return value. */
t->u = (int64_t)u;
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 59 of them will have determinant 2^59. Multiplying with the initial
* 8*identity (which has determinant 2^6) means the overall outputs has determinant
* 2^65. */
VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 65);
return zeta;
}
/* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta).
*
* Input: eta: initial eta
* f0: bottom limb of initial f
* g0: bottom limb of initial g
* Output: t: transition matrix
* Return: final eta
*
* Implements the divsteps_n_matrix_var function from the explanation.
*/
static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
/* Transformation matrix; see comments in rustsecp256k1_v0_4_1_modinv64_divsteps_62. */
uint64_t u = 1, v = 0, q = 0, r = 1;
uint64_t f = f0, g = g0, m;
uint32_t w;
int i = 62, limit, zeros;
for (;;) {
/* Use a sentinel bit to count zeros only up to i. */
zeros = rustsecp256k1_v0_4_1_ctz64_var(g | (UINT64_MAX << i));
/* Perform zeros divsteps at once; they all just divide g by two. */
g >>= zeros;
u <<= zeros;
v <<= zeros;
eta -= zeros;
i -= zeros;
/* We're done once we've done 62 divsteps. */
if (i == 0) break;
VERIFY_CHECK((f & 1) == 1);
VERIFY_CHECK((g & 1) == 1);
VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
/* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
VERIFY_CHECK(eta >= -745 && eta <= 745);
/* If eta is negative, negate it and replace f,g with g,-f. */
if (eta < 0) {
uint64_t tmp;
eta = -eta;
tmp = f; f = g; g = -tmp;
tmp = u; u = q; q = -tmp;
tmp = v; v = r; r = -tmp;
/* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
* out (as we'd be done before that point), and no more than eta+1 can be done as its
* will flip again once that happens. */
limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
VERIFY_CHECK(limit > 0 && limit <= 62);
/* m is a mask for the bottom min(limit, 6) bits. */
m = (UINT64_MAX >> (64 - limit)) & 63U;
/* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
* bits. */
w = (f * g * (f * f - 2)) & m;
} else {
/* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
* eta tends to be smaller here. */
limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
VERIFY_CHECK(limit > 0 && limit <= 62);
/* m is a mask for the bottom min(limit, 4) bits. */
m = (UINT64_MAX >> (64 - limit)) & 15U;
/* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
* bits. */
w = f + (((f + 1) & 4) << 1);
w = (-w * g) & m;
}
g += f * w;
q += u * w;
r += v * w;
VERIFY_CHECK((g & m) == 0);
}
/* Return data in t and return value. */
t->u = (int64_t)u;
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 62 of them will have determinant 2^62. */
VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62);
return eta;
}
/* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62.
*
* On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
* (-2^62,2^62).
*
* This implements the update_de function from the explanation.
*/
static void rustsecp256k1_v0_4_1_modinv64_update_de_62(rustsecp256k1_v0_4_1_modinv64_signed62 *d, rustsecp256k1_v0_4_1_modinv64_signed62 *e, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t, const rustsecp256k1_v0_4_1_modinv64_modinfo* modinfo) {
const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4];
const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4];
const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int64_t md, me, sd, se;
int128_t cd, ce;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */
VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */
VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */
VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d4 >> 63;
se = e4 >> 63;
md = (u & sd) + (v & se);
me = (q & sd) + (r & se);
/* Begin computing t*[d,e]. */
cd = (int128_t)u * d0 + (int128_t)v * e0;
ce = (int128_t)q * d0 + (int128_t)r * e0;
/* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */
md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62;
me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62;
/* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
cd += (int128_t)modinfo->modulus.v[0] * md;
ce += (int128_t)modinfo->modulus.v[0] * me;
/* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */
VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62;
VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62;
/* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */
cd += (int128_t)u * d1 + (int128_t)v * e1;
ce += (int128_t)q * d1 + (int128_t)r * e1;
if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */
cd += (int128_t)modinfo->modulus.v[1] * md;
ce += (int128_t)modinfo->modulus.v[1] * me;
}
d->v[0] = (int64_t)cd & M62; cd >>= 62;
e->v[0] = (int64_t)ce & M62; ce >>= 62;
/* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */
cd += (int128_t)u * d2 + (int128_t)v * e2;
ce += (int128_t)q * d2 + (int128_t)r * e2;
if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */
cd += (int128_t)modinfo->modulus.v[2] * md;
ce += (int128_t)modinfo->modulus.v[2] * me;
}
d->v[1] = (int64_t)cd & M62; cd >>= 62;
e->v[1] = (int64_t)ce & M62; ce >>= 62;
/* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */
cd += (int128_t)u * d3 + (int128_t)v * e3;
ce += (int128_t)q * d3 + (int128_t)r * e3;
if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */
cd += (int128_t)modinfo->modulus.v[3] * md;
ce += (int128_t)modinfo->modulus.v[3] * me;
}
d->v[2] = (int64_t)cd & M62; cd >>= 62;
e->v[2] = (int64_t)ce & M62; ce >>= 62;
/* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */
cd += (int128_t)u * d4 + (int128_t)v * e4;
ce += (int128_t)q * d4 + (int128_t)r * e4;
cd += (int128_t)modinfo->modulus.v[4] * md;
ce += (int128_t)modinfo->modulus.v[4] * me;
d->v[3] = (int64_t)cd & M62; cd >>= 62;
e->v[3] = (int64_t)ce & M62; ce >>= 62;
/* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
d->v[4] = (int64_t)cd;
e->v[4] = (int64_t)ce;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
*
* This implements the update_fg function from the explanation.
*/
static void rustsecp256k1_v0_4_1_modinv64_update_fg_62(rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];
const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int128_t cf, cg;
/* Start computing t*[f,g]. */
cf = (int128_t)u * f0 + (int128_t)v * g0;
cg = (int128_t)q * f0 + (int128_t)r * g0;
/* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
/* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */
cf += (int128_t)u * f1 + (int128_t)v * g1;
cg += (int128_t)q * f1 + (int128_t)r * g1;
f->v[0] = (int64_t)cf & M62; cf >>= 62;
g->v[0] = (int64_t)cg & M62; cg >>= 62;
/* Compute limb 2 of t*[f,g], and store it as output limb 1. */
cf += (int128_t)u * f2 + (int128_t)v * g2;
cg += (int128_t)q * f2 + (int128_t)r * g2;
f->v[1] = (int64_t)cf & M62; cf >>= 62;
g->v[1] = (int64_t)cg & M62; cg >>= 62;
/* Compute limb 3 of t*[f,g], and store it as output limb 2. */
cf += (int128_t)u * f3 + (int128_t)v * g3;
cg += (int128_t)q * f3 + (int128_t)r * g3;
f->v[2] = (int64_t)cf & M62; cf >>= 62;
g->v[2] = (int64_t)cg & M62; cg >>= 62;
/* Compute limb 4 of t*[f,g], and store it as output limb 3. */
cf += (int128_t)u * f4 + (int128_t)v * g4;
cg += (int128_t)q * f4 + (int128_t)r * g4;
f->v[3] = (int64_t)cf & M62; cf >>= 62;
g->v[3] = (int64_t)cg & M62; cg >>= 62;
/* What remains is limb 5 of t*[f,g]; store it as output limb 4. */
f->v[4] = (int64_t)cf;
g->v[4] = (int64_t)cg;
}
/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
*
* Version that operates on a variable number of limbs in f and g.
*
* This implements the update_fg function from the explanation.
*/
static void rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int64_t fi, gi;
int128_t cf, cg;
int i;
VERIFY_CHECK(len > 0);
/* Start computing t*[f,g]. */
fi = f->v[0];
gi = g->v[0];
cf = (int128_t)u * fi + (int128_t)v * gi;
cg = (int128_t)q * fi + (int128_t)r * gi;
/* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
/* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
* down by 62 bits). */
for (i = 1; i < len; ++i) {
fi = f->v[i];
gi = g->v[i];
cf += (int128_t)u * fi + (int128_t)v * gi;
cg += (int128_t)q * fi + (int128_t)r * gi;
f->v[i - 1] = (int64_t)cf & M62; cf >>= 62;
g->v[i - 1] = (int64_t)cg & M62; cg >>= 62;
}
/* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
f->v[len - 1] = (int64_t)cf;
g->v[len - 1] = (int64_t)cg;
}
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
static void rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
/* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus;
rustsecp256k1_v0_4_1_modinv64_signed62 g = *x;
int i;
int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */
/* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */
for (i = 0; i < 10; ++i) {
/* Compute transition matrix and new zeta after 59 divsteps. */
rustsecp256k1_v0_4_1_modinv64_trans2x2 t;
zeta = rustsecp256k1_v0_4_1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t);
/* Update d,e using that transition matrix. */
rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
rustsecp256k1_v0_4_1_modinv64_update_fg_62(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[4], modinfo);
*x = d;
}
/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
static void rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
/* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus;
rustsecp256k1_v0_4_1_modinv64_signed62 g = *x;
#ifdef VERIFY
int i = 0;
#endif
int j, len = 5;
int64_t eta = -1; /* eta = -delta; delta is initially 1 */
int64_t cond, fn, gn;
/* Do iterations of 62 divsteps each until g=0. */
while (1) {
/* Compute transition matrix and new eta after 62 divsteps. */
rustsecp256k1_v0_4_1_modinv64_trans2x2 t;
eta = rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t);
/* Update d,e using that transition matrix. */
rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of g is zero, there is a chance that g=0. */
if (g.v[0] == 0) {
cond = 0;
/* Check if the other limbs are also 0. */
for (j = 1; j < len; ++j) {
cond |= g.v[j];
}
/* If so, we're done. */
if (cond == 0) break;
}
/* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
fn = f.v[len - 1];
gn = g.v[len - 1];
cond = ((int64_t)len - 2) >> 63;
cond |= fn ^ (fn >> 63);
cond |= gn ^ (gn >> 63);
/* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
if (cond == 0) {
f.v[len - 2] |= (uint64_t)fn << 62;
g.v[len - 2] |= (uint64_t)gn << 62;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
*x = d;
}
#endif /* SECP256K1_MODINV64_IMPL_H */

View File

@ -1,4 +1,4 @@
include_HEADERS += include/rustsecp256k1_v0_4_0_ecdh.h
include_HEADERS += include/rustsecp256k1_v0_4_1_ecdh.h
noinst_HEADERS += src/modules/ecdh/main_impl.h
noinst_HEADERS += src/modules/ecdh/tests_impl.h
if USE_BENCHMARK

View File

@ -7,31 +7,31 @@
#ifndef SECP256K1_MODULE_ECDH_MAIN_H
#define SECP256K1_MODULE_ECDH_MAIN_H
#include "include/secp256k1_ecdh.h"
#include "ecmult_const_impl.h"
#include "../../../include/secp256k1_ecdh.h"
#include "../../ecmult_const_impl.h"
static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) {
unsigned char version = (y32[31] & 0x01) | 0x02;
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha;
(void)data;
rustsecp256k1_v0_4_0_sha256_initialize(&sha);
rustsecp256k1_v0_4_0_sha256_write(&sha, &version, 1);
rustsecp256k1_v0_4_0_sha256_write(&sha, x32, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, output);
rustsecp256k1_v0_4_1_sha256_initialize(&sha);
rustsecp256k1_v0_4_1_sha256_write(&sha, &version, 1);
rustsecp256k1_v0_4_1_sha256_write(&sha, x32, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, output);
return 1;
}
const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256;
const rustsecp256k1_v0_4_0_ecdh_hash_function rustsecp256k1_v0_4_0_ecdh_hash_function_default = ecdh_hash_function_sha256;
const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_sha256 = ecdh_hash_function_sha256;
const rustsecp256k1_v0_4_1_ecdh_hash_function rustsecp256k1_v0_4_1_ecdh_hash_function_default = ecdh_hash_function_sha256;
int rustsecp256k1_v0_4_0_ecdh(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_4_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_4_0_ecdh_hash_function hashfp, void *data) {
int rustsecp256k1_v0_4_1_ecdh(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, const rustsecp256k1_v0_4_1_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_4_1_ecdh_hash_function hashfp, void *data) {
int ret = 0;
int overflow = 0;
rustsecp256k1_v0_4_0_gej res;
rustsecp256k1_v0_4_0_ge pt;
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_1_gej res;
rustsecp256k1_v0_4_1_ge pt;
rustsecp256k1_v0_4_1_scalar s;
unsigned char x[32];
unsigned char y[32];
@ -41,29 +41,29 @@ int rustsecp256k1_v0_4_0_ecdh(const rustsecp256k1_v0_4_0_context* ctx, unsigned
ARG_CHECK(scalar != NULL);
if (hashfp == NULL) {
hashfp = rustsecp256k1_v0_4_0_ecdh_hash_function_default;
hashfp = rustsecp256k1_v0_4_1_ecdh_hash_function_default;
}
rustsecp256k1_v0_4_0_pubkey_load(ctx, &pt, point);
rustsecp256k1_v0_4_0_scalar_set_b32(&s, scalar, &overflow);
rustsecp256k1_v0_4_1_pubkey_load(ctx, &pt, point);
rustsecp256k1_v0_4_1_scalar_set_b32(&s, scalar, &overflow);
overflow |= rustsecp256k1_v0_4_0_scalar_is_zero(&s);
rustsecp256k1_v0_4_0_scalar_cmov(&s, &rustsecp256k1_v0_4_0_scalar_one, overflow);
overflow |= rustsecp256k1_v0_4_1_scalar_is_zero(&s);
rustsecp256k1_v0_4_1_scalar_cmov(&s, &rustsecp256k1_v0_4_1_scalar_one, overflow);
rustsecp256k1_v0_4_0_ecmult_const(&res, &pt, &s, 256);
rustsecp256k1_v0_4_0_ge_set_gej(&pt, &res);
rustsecp256k1_v0_4_1_ecmult_const(&res, &pt, &s, 256);
rustsecp256k1_v0_4_1_ge_set_gej(&pt, &res);
/* Compute a hash of the point */
rustsecp256k1_v0_4_0_fe_normalize(&pt.x);
rustsecp256k1_v0_4_0_fe_normalize(&pt.y);
rustsecp256k1_v0_4_0_fe_get_b32(x, &pt.x);
rustsecp256k1_v0_4_0_fe_get_b32(y, &pt.y);
rustsecp256k1_v0_4_1_fe_normalize(&pt.x);
rustsecp256k1_v0_4_1_fe_normalize(&pt.y);
rustsecp256k1_v0_4_1_fe_get_b32(x, &pt.x);
rustsecp256k1_v0_4_1_fe_get_b32(y, &pt.y);
ret = hashfp(output, x, y, data);
memset(x, 0, 32);
memset(y, 0, 32);
rustsecp256k1_v0_4_0_scalar_clear(&s);
rustsecp256k1_v0_4_1_scalar_clear(&s);
return !!ret & !overflow;
}

View File

@ -26,71 +26,71 @@ int ecdh_hash_function_custom(unsigned char *output, const unsigned char *x, con
void test_ecdh_api(void) {
/* Setup context that just counts errors */
rustsecp256k1_v0_4_0_context *tctx = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_0_pubkey point;
rustsecp256k1_v0_4_1_context *tctx = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_1_pubkey point;
unsigned char res[32];
unsigned char s_one[32] = { 0 };
int32_t ecount = 0;
s_one[31] = 1;
rustsecp256k1_v0_4_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(tctx, &point, s_one) == 1);
rustsecp256k1_v0_4_1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(tctx, &point, s_one) == 1);
/* Check all NULLs are detected */
CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 0);
CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 3);
/* Cleanup */
rustsecp256k1_v0_4_0_context_destroy(tctx);
rustsecp256k1_v0_4_1_context_destroy(tctx);
}
void test_ecdh_generator_basepoint(void) {
unsigned char s_one[32] = { 0 };
rustsecp256k1_v0_4_0_pubkey point[2];
rustsecp256k1_v0_4_1_pubkey point[2];
int i;
s_one[31] = 1;
/* Check against pubkey creation when the basepoint is the generator */
for (i = 0; i < 100; ++i) {
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha;
unsigned char s_b32[32];
unsigned char output_ecdh[65];
unsigned char output_ser[32];
unsigned char point_ser[65];
size_t point_ser_len = sizeof(point_ser);
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_1_scalar s;
random_scalar_order(&s);
rustsecp256k1_v0_4_0_scalar_get_b32(s_b32, &s);
rustsecp256k1_v0_4_1_scalar_get_b32(s_b32, &s);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point[0], s_one) == 1);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point[0], s_one) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
/* compute using ECDH function with custom hash function */
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1);
/* compute "explicitly" */
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
/* compare */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(output_ecdh, point_ser, 65) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(output_ecdh, point_ser, 65) == 0);
/* compute using ECDH function with default hash function */
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
/* compute "explicitly" */
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
rustsecp256k1_v0_4_0_sha256_initialize(&sha);
rustsecp256k1_v0_4_0_sha256_write(&sha, point_ser, point_ser_len);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, output_ser);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
rustsecp256k1_v0_4_1_sha256_initialize(&sha);
rustsecp256k1_v0_4_1_sha256_write(&sha, point_ser, point_ser_len);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, output_ser);
/* compare */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(output_ecdh, output_ser, 32) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(output_ecdh, output_ser, 32) == 0);
}
}
@ -104,23 +104,23 @@ void test_bad_scalar(void) {
};
unsigned char s_rand[32] = { 0 };
unsigned char output[32];
rustsecp256k1_v0_4_0_scalar rand;
rustsecp256k1_v0_4_0_pubkey point;
rustsecp256k1_v0_4_1_scalar rand;
rustsecp256k1_v0_4_1_pubkey point;
/* Create random point */
random_scalar_order(&rand);
rustsecp256k1_v0_4_0_scalar_get_b32(s_rand, &rand);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &point, s_rand) == 1);
rustsecp256k1_v0_4_1_scalar_get_b32(s_rand, &rand);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &point, s_rand) == 1);
/* Try to multiply it by bad values */
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0);
/* ...and a good one */
s_overflow[31] -= 1;
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1);
/* Hash function failure results in ecdh failure */
CHECK(rustsecp256k1_v0_4_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0);
}
void run_ecdh_tests(void) {

View File

@ -1,4 +1,4 @@
include_HEADERS += include/rustsecp256k1_v0_4_0_extrakeys.h
include_HEADERS += include/rustsecp256k1_v0_4_1_extrakeys.h
noinst_HEADERS += src/modules/extrakeys/tests_impl.h
noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h
noinst_HEADERS += src/modules/extrakeys/main_impl.h

View File

@ -4,143 +4,169 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_EXTRAKEYS_MAIN_
#define _SECP256K1_MODULE_EXTRAKEYS_MAIN_
#ifndef SECP256K1_MODULE_EXTRAKEYS_MAIN_H
#define SECP256K1_MODULE_EXTRAKEYS_MAIN_H
#include "include/secp256k1.h"
#include "include/secp256k1_extrakeys.h"
#include "../../../include/secp256k1.h"
#include "../../../include/secp256k1_extrakeys.h"
static SECP256K1_INLINE int rustsecp256k1_v0_4_0_xonly_pubkey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ge *ge, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) {
return rustsecp256k1_v0_4_0_pubkey_load(ctx, ge, (const rustsecp256k1_v0_4_0_pubkey *) pubkey);
static SECP256K1_INLINE int rustsecp256k1_v0_4_1_xonly_pubkey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ge *ge, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) {
return rustsecp256k1_v0_4_1_pubkey_load(ctx, ge, (const rustsecp256k1_v0_4_1_pubkey *) pubkey);
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_xonly_pubkey_save(rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, rustsecp256k1_v0_4_0_ge *ge) {
rustsecp256k1_v0_4_0_pubkey_save((rustsecp256k1_v0_4_0_pubkey *) pubkey, ge);
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_xonly_pubkey_save(rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, rustsecp256k1_v0_4_1_ge *ge) {
rustsecp256k1_v0_4_1_pubkey_save((rustsecp256k1_v0_4_1_pubkey *) pubkey, ge);
}
int rustsecp256k1_v0_4_0_xonly_pubkey_parse(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, const unsigned char *input32) {
rustsecp256k1_v0_4_0_ge pk;
rustsecp256k1_v0_4_0_fe x;
int rustsecp256k1_v0_4_1_xonly_pubkey_parse(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, const unsigned char *input32) {
rustsecp256k1_v0_4_1_ge pk;
rustsecp256k1_v0_4_1_fe x;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(input32 != NULL);
if (!rustsecp256k1_v0_4_0_fe_set_b32(&x, input32)) {
if (!rustsecp256k1_v0_4_1_fe_set_b32(&x, input32)) {
return 0;
}
if (!rustsecp256k1_v0_4_0_ge_set_xo_var(&pk, &x, 0)) {
if (!rustsecp256k1_v0_4_1_ge_set_xo_var(&pk, &x, 0)) {
return 0;
}
if (!rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(&pk)) {
if (!rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(&pk)) {
return 0;
}
rustsecp256k1_v0_4_0_xonly_pubkey_save(pubkey, &pk);
rustsecp256k1_v0_4_1_xonly_pubkey_save(pubkey, &pk);
return 1;
}
int rustsecp256k1_v0_4_0_xonly_pubkey_serialize(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output32, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) {
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_xonly_pubkey_serialize(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output32, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) {
rustsecp256k1_v0_4_1_ge pk;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output32 != NULL);
memset(output32, 0, 32);
ARG_CHECK(pubkey != NULL);
if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, pubkey)) {
if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, pubkey)) {
return 0;
}
rustsecp256k1_v0_4_0_fe_get_b32(output32, &pk.x);
rustsecp256k1_v0_4_1_fe_get_b32(output32, &pk.x);
return 1;
}
int rustsecp256k1_v0_4_1_xonly_pubkey_cmp(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_xonly_pubkey* pk0, const rustsecp256k1_v0_4_1_xonly_pubkey* pk1) {
unsigned char out[2][32];
const rustsecp256k1_v0_4_1_xonly_pubkey* pk[2];
int i;
VERIFY_CHECK(ctx != NULL);
pk[0] = pk0; pk[1] = pk1;
for (i = 0; i < 2; i++) {
/* If the public key is NULL or invalid, xonly_pubkey_serialize will
* call the illegal_callback and return 0. In that case we will
* serialize the key as all zeros which is less than any valid public
* key. This results in consistent comparisons even if NULL or invalid
* pubkeys are involved and prevents edge cases such as sorting
* algorithms that use this function and do not terminate as a
* result. */
if (!rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, out[i], pk[i])) {
/* Note that xonly_pubkey_serialize should already set the output to
* zero in that case, but it's not guaranteed by the API, we can't
* test it and writing a VERIFY_CHECK is more complex than
* explicitly memsetting (again). */
memset(out[i], 0, sizeof(out[i]));
}
}
return rustsecp256k1_v0_4_1_memcmp_var(out[0], out[1], sizeof(out[1]));
}
/** Keeps a group element as is if it has an even Y and otherwise negates it.
* y_parity is set to 0 in the former case and to 1 in the latter case.
* Requires that the coordinates of r are normalized. */
static int rustsecp256k1_v0_4_0_extrakeys_ge_even_y(rustsecp256k1_v0_4_0_ge *r) {
static int rustsecp256k1_v0_4_1_extrakeys_ge_even_y(rustsecp256k1_v0_4_1_ge *r) {
int y_parity = 0;
VERIFY_CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(r));
VERIFY_CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(r));
if (rustsecp256k1_v0_4_0_fe_is_odd(&r->y)) {
rustsecp256k1_v0_4_0_fe_negate(&r->y, &r->y, 1);
if (rustsecp256k1_v0_4_1_fe_is_odd(&r->y)) {
rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
y_parity = 1;
}
return y_parity;
}
int rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_4_0_pubkey *pubkey) {
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_4_1_pubkey *pubkey) {
rustsecp256k1_v0_4_1_ge pk;
int tmp;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(xonly_pubkey != NULL);
ARG_CHECK(pubkey != NULL);
if (!rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk, pubkey)) {
if (!rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk, pubkey)) {
return 0;
}
tmp = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk);
tmp = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk);
if (pk_parity != NULL) {
*pk_parity = tmp;
}
rustsecp256k1_v0_4_0_xonly_pubkey_save(xonly_pubkey, &pk);
rustsecp256k1_v0_4_1_xonly_pubkey_save(xonly_pubkey, &pk);
return 1;
}
int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *output_pubkey, const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *output_pubkey, const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_ge pk;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output_pubkey != NULL);
memset(output_pubkey, 0, sizeof(*output_pubkey));
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(internal_pubkey != NULL);
ARG_CHECK(tweak32 != NULL);
if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, internal_pubkey)
|| !rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, internal_pubkey)
|| !rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
return 0;
}
rustsecp256k1_v0_4_0_pubkey_save(output_pubkey, &pk);
rustsecp256k1_v0_4_1_pubkey_save(output_pubkey, &pk);
return 1;
}
int rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_4_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_4_1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_ge pk;
unsigned char pk_expected32[32];
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(internal_pubkey != NULL);
ARG_CHECK(tweaked_pubkey32 != NULL);
ARG_CHECK(tweak32 != NULL);
if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, internal_pubkey)
|| !rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, internal_pubkey)
|| !rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
return 0;
}
rustsecp256k1_v0_4_0_fe_normalize_var(&pk.x);
rustsecp256k1_v0_4_0_fe_normalize_var(&pk.y);
rustsecp256k1_v0_4_0_fe_get_b32(pk_expected32, &pk.x);
rustsecp256k1_v0_4_1_fe_normalize_var(&pk.x);
rustsecp256k1_v0_4_1_fe_normalize_var(&pk.y);
rustsecp256k1_v0_4_1_fe_get_b32(pk_expected32, &pk.x);
return rustsecp256k1_v0_4_0_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0
&& rustsecp256k1_v0_4_0_fe_is_odd(&pk.y) == tweaked_pk_parity;
return rustsecp256k1_v0_4_1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0
&& rustsecp256k1_v0_4_1_fe_is_odd(&pk.y) == tweaked_pk_parity;
}
static void rustsecp256k1_v0_4_0_keypair_save(rustsecp256k1_v0_4_0_keypair *keypair, const rustsecp256k1_v0_4_0_scalar *sk, rustsecp256k1_v0_4_0_ge *pk) {
rustsecp256k1_v0_4_0_scalar_get_b32(&keypair->data[0], sk);
rustsecp256k1_v0_4_0_pubkey_save((rustsecp256k1_v0_4_0_pubkey *)&keypair->data[32], pk);
static void rustsecp256k1_v0_4_1_keypair_save(rustsecp256k1_v0_4_1_keypair *keypair, const rustsecp256k1_v0_4_1_scalar *sk, rustsecp256k1_v0_4_1_ge *pk) {
rustsecp256k1_v0_4_1_scalar_get_b32(&keypair->data[0], sk);
rustsecp256k1_v0_4_1_pubkey_save((rustsecp256k1_v0_4_1_pubkey *)&keypair->data[32], pk);
}
static int rustsecp256k1_v0_4_0_keypair_seckey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar *sk, const rustsecp256k1_v0_4_0_keypair *keypair) {
static int rustsecp256k1_v0_4_1_keypair_seckey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar *sk, const rustsecp256k1_v0_4_1_keypair *keypair) {
int ret;
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(sk, &keypair->data[0]);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(sk, &keypair->data[0]);
/* We can declassify ret here because sk is only zero if a keypair function
* failed (which zeroes the keypair) and its return value is ignored. */
rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret));
rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret));
ARG_CHECK(ret);
return ret;
}
@ -148,45 +174,55 @@ static int rustsecp256k1_v0_4_0_keypair_seckey_load(const rustsecp256k1_v0_4_0_c
/* Load a keypair into pk and sk (if non-NULL). This function declassifies pk
* and ARG_CHECKs that the keypair is not invalid. It always initializes sk and
* pk with dummy values. */
static int rustsecp256k1_v0_4_0_keypair_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar *sk, rustsecp256k1_v0_4_0_ge *pk, const rustsecp256k1_v0_4_0_keypair *keypair) {
static int rustsecp256k1_v0_4_1_keypair_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar *sk, rustsecp256k1_v0_4_1_ge *pk, const rustsecp256k1_v0_4_1_keypair *keypair) {
int ret;
const rustsecp256k1_v0_4_0_pubkey *pubkey = (const rustsecp256k1_v0_4_0_pubkey *)&keypair->data[32];
const rustsecp256k1_v0_4_1_pubkey *pubkey = (const rustsecp256k1_v0_4_1_pubkey *)&keypair->data[32];
/* Need to declassify the pubkey because pubkey_load ARG_CHECKs if it's
* invalid. */
rustsecp256k1_v0_4_0_declassify(ctx, pubkey, sizeof(*pubkey));
ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, pk, pubkey);
rustsecp256k1_v0_4_1_declassify(ctx, pubkey, sizeof(*pubkey));
ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, pk, pubkey);
if (sk != NULL) {
ret = ret && rustsecp256k1_v0_4_0_keypair_seckey_load(ctx, sk, keypair);
ret = ret && rustsecp256k1_v0_4_1_keypair_seckey_load(ctx, sk, keypair);
}
if (!ret) {
*pk = rustsecp256k1_v0_4_0_ge_const_g;
*pk = rustsecp256k1_v0_4_1_ge_const_g;
if (sk != NULL) {
*sk = rustsecp256k1_v0_4_0_scalar_one;
*sk = rustsecp256k1_v0_4_1_scalar_one;
}
}
return ret;
}
int rustsecp256k1_v0_4_0_keypair_create(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_keypair *keypair, const unsigned char *seckey32) {
rustsecp256k1_v0_4_0_scalar sk;
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_keypair_create(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *seckey32) {
rustsecp256k1_v0_4_1_scalar sk;
rustsecp256k1_v0_4_1_ge pk;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(keypair != NULL);
memset(keypair, 0, sizeof(*keypair));
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(seckey32 != NULL);
ret = rustsecp256k1_v0_4_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32);
rustsecp256k1_v0_4_0_keypair_save(keypair, &sk, &pk);
rustsecp256k1_v0_4_0_memczero(keypair, sizeof(*keypair), !ret);
ret = rustsecp256k1_v0_4_1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32);
rustsecp256k1_v0_4_1_keypair_save(keypair, &sk, &pk);
rustsecp256k1_v0_4_1_memczero(keypair, sizeof(*keypair), !ret);
rustsecp256k1_v0_4_0_scalar_clear(&sk);
rustsecp256k1_v0_4_1_scalar_clear(&sk);
return ret;
}
int rustsecp256k1_v0_4_0_keypair_pub(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const rustsecp256k1_v0_4_0_keypair *keypair) {
int rustsecp256k1_v0_4_1_keypair_sec(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_4_1_keypair *keypair) {
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
memset(seckey, 0, 32);
ARG_CHECK(keypair != NULL);
memcpy(seckey, &keypair->data[0], 32);
return 1;
}
int rustsecp256k1_v0_4_1_keypair_pub(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const rustsecp256k1_v0_4_1_keypair *keypair) {
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
@ -196,8 +232,8 @@ int rustsecp256k1_v0_4_0_keypair_pub(const rustsecp256k1_v0_4_0_context* ctx, ru
return 1;
}
int rustsecp256k1_v0_4_0_keypair_xonly_pub(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_4_0_keypair *keypair) {
rustsecp256k1_v0_4_0_ge pk;
int rustsecp256k1_v0_4_1_keypair_xonly_pub(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_4_1_keypair *keypair) {
rustsecp256k1_v0_4_1_ge pk;
int tmp;
VERIFY_CHECK(ctx != NULL);
@ -205,46 +241,46 @@ int rustsecp256k1_v0_4_0_keypair_xonly_pub(const rustsecp256k1_v0_4_0_context* c
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(keypair != NULL);
if (!rustsecp256k1_v0_4_0_keypair_load(ctx, NULL, &pk, keypair)) {
if (!rustsecp256k1_v0_4_1_keypair_load(ctx, NULL, &pk, keypair)) {
return 0;
}
tmp = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk);
tmp = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk);
if (pk_parity != NULL) {
*pk_parity = tmp;
}
rustsecp256k1_v0_4_0_xonly_pubkey_save(pubkey, &pk);
rustsecp256k1_v0_4_1_xonly_pubkey_save(pubkey, &pk);
return 1;
}
int rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_keypair *keypair, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_ge pk;
rustsecp256k1_v0_4_0_scalar sk;
int rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_keypair *keypair, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_ge pk;
rustsecp256k1_v0_4_1_scalar sk;
int y_parity;
int ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(keypair != NULL);
ARG_CHECK(tweak32 != NULL);
ret = rustsecp256k1_v0_4_0_keypair_load(ctx, &sk, &pk, keypair);
ret = rustsecp256k1_v0_4_1_keypair_load(ctx, &sk, &pk, keypair);
memset(keypair, 0, sizeof(*keypair));
y_parity = rustsecp256k1_v0_4_0_extrakeys_ge_even_y(&pk);
y_parity = rustsecp256k1_v0_4_1_extrakeys_ge_even_y(&pk);
if (y_parity == 1) {
rustsecp256k1_v0_4_0_scalar_negate(&sk, &sk);
rustsecp256k1_v0_4_1_scalar_negate(&sk, &sk);
}
ret &= rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(&sk, tweak32);
ret &= rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32);
ret &= rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(&sk, tweak32);
ret &= rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32);
rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret));
rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret));
if (ret) {
rustsecp256k1_v0_4_0_keypair_save(keypair, &sk, &pk);
rustsecp256k1_v0_4_1_keypair_save(keypair, &sk, &pk);
}
rustsecp256k1_v0_4_0_scalar_clear(&sk);
rustsecp256k1_v0_4_1_scalar_clear(&sk);
return ret;
}

View File

@ -4,60 +4,60 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
#ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H
#define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H
#include "src/modules/extrakeys/main_impl.h"
#include "include/secp256k1_extrakeys.h"
#include "../../../include/secp256k1_extrakeys.h"
static void test_exhaustive_extrakeys(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge* group) {
rustsecp256k1_v0_4_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_0_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
static void test_exhaustive_extrakeys(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge* group) {
rustsecp256k1_v0_4_1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
int parities[EXHAUSTIVE_TEST_ORDER - 1];
unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
int i;
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
rustsecp256k1_v0_4_0_fe fe;
rustsecp256k1_v0_4_0_scalar scalar_i;
rustsecp256k1_v0_4_1_fe fe;
rustsecp256k1_v0_4_1_scalar scalar_i;
unsigned char buf[33];
int parity;
rustsecp256k1_v0_4_0_scalar_set_int(&scalar_i, i);
rustsecp256k1_v0_4_0_scalar_get_b32(buf, &scalar_i);
rustsecp256k1_v0_4_1_scalar_set_int(&scalar_i, i);
rustsecp256k1_v0_4_1_scalar_get_b32(buf, &scalar_i);
/* Construct pubkey and keypair. */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey[i - 1], buf));
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey[i - 1], buf));
/* Construct serialized xonly_pubkey from keypair. */
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1]));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1]));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
/* Parse the xonly_pubkey back and verify it matches the previously serialized value. */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1]));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1]));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
/* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1]));
CHECK(parity == parities[i - 1]);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
/* Compare the xonly_pubkey bytes against the precomputed group. */
rustsecp256k1_v0_4_0_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]);
CHECK(rustsecp256k1_v0_4_0_fe_equal_var(&fe, &group[i].x));
rustsecp256k1_v0_4_1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]);
CHECK(rustsecp256k1_v0_4_1_fe_equal_var(&fe, &group[i].x));
/* Check the parity against the precomputed group. */
fe = group[i].y;
rustsecp256k1_v0_4_0_fe_normalize_var(&fe);
CHECK(rustsecp256k1_v0_4_0_fe_is_odd(&fe) == parities[i - 1]);
rustsecp256k1_v0_4_1_fe_normalize_var(&fe);
CHECK(rustsecp256k1_v0_4_1_fe_is_odd(&fe) == parities[i - 1]);
/* Verify that the higher half is identical to the lower half mirrored. */
if (i > EXHAUSTIVE_TEST_ORDER / 2) {
CHECK(rustsecp256k1_v0_4_0_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0);
CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]);
}
}

View File

@ -4,24 +4,24 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_
#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_
#ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_H
#define SECP256K1_MODULE_EXTRAKEYS_TESTS_H
#include "secp256k1_extrakeys.h"
#include "../../../include/secp256k1_extrakeys.h"
static rustsecp256k1_v0_4_0_context* api_test_context(int flags, int *ecount) {
rustsecp256k1_v0_4_0_context *ctx0 = rustsecp256k1_v0_4_0_context_create(flags);
rustsecp256k1_v0_4_0_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount);
static rustsecp256k1_v0_4_1_context* api_test_context(int flags, int *ecount) {
rustsecp256k1_v0_4_1_context *ctx0 = rustsecp256k1_v0_4_1_context_create(flags);
rustsecp256k1_v0_4_1_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount);
return ctx0;
}
void test_xonly_pubkey(void) {
rustsecp256k1_v0_4_0_pubkey pk;
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk, xonly_pk_tmp;
rustsecp256k1_v0_4_0_ge pk1;
rustsecp256k1_v0_4_0_ge pk2;
rustsecp256k1_v0_4_0_fe y;
rustsecp256k1_v0_4_1_pubkey pk;
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk, xonly_pk_tmp;
rustsecp256k1_v0_4_1_ge pk1;
rustsecp256k1_v0_4_1_ge pk2;
rustsecp256k1_v0_4_1_fe y;
unsigned char sk[32];
unsigned char xy_sk[32];
unsigned char buf32[32];
@ -31,249 +31,286 @@ void test_xonly_pubkey(void) {
int i;
int ecount;
rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_0_testrand256(sk);
rustsecp256k1_v0_4_1_testrand256(sk);
memset(ones32, 0xFF, 32);
rustsecp256k1_v0_4_0_testrand256(xy_sk);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
rustsecp256k1_v0_4_1_testrand256(xy_sk);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
/* Test xonly_pubkey_from_pubkey */
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
memset(&pk, 0, sizeof(pk));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0);
CHECK(ecount == 3);
/* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */
memset(sk, 0, sizeof(sk));
sk[0] = 1;
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0);
CHECK(pk_parity == 0);
/* Choose a secret key such that pubkey and xonly_pubkey are each others
* negation. */
sk[0] = 2;
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
CHECK(pk_parity == 1);
rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk1, &pk);
rustsecp256k1_v0_4_0_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_4_0_pubkey *) &xonly_pk);
CHECK(rustsecp256k1_v0_4_0_fe_equal(&pk1.x, &pk2.x) == 1);
rustsecp256k1_v0_4_0_fe_negate(&y, &pk2.y, 1);
CHECK(rustsecp256k1_v0_4_0_fe_equal(&pk1.y, &y) == 1);
rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk1, &pk);
rustsecp256k1_v0_4_1_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_4_1_pubkey *) &xonly_pk);
CHECK(rustsecp256k1_v0_4_1_fe_equal(&pk1.x, &pk2.x) == 1);
rustsecp256k1_v0_4_1_fe_negate(&y, &pk2.y, 1);
CHECK(rustsecp256k1_v0_4_1_fe_equal(&pk1.y, &y) == 1);
/* Test xonly_pubkey_serialize and xonly_pubkey_parse */
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf32, zeros64, 32) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf32, zeros64, 32) == 0);
CHECK(ecount == 2);
{
/* A pubkey filled with 0s will fail to serialize due to pubkey_load
* special casing. */
rustsecp256k1_v0_4_0_xonly_pubkey pk_tmp;
rustsecp256k1_v0_4_1_xonly_pubkey pk_tmp;
memset(&pk_tmp, 0, sizeof(pk_tmp));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0);
}
/* pubkey_load called illegal callback */
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, NULL, buf32) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, NULL, buf32) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0);
CHECK(ecount == 2);
/* Serialization and parse roundtrip */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
/* Test parsing invalid field elements */
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* Overflowing field element */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* There's no point with x-coordinate 0 on secp256k1 */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
/* If a random 32-byte string can not be parsed with ec_pubkey_parse
* (because interpreted as X coordinate it does not correspond to a point on
* the curve) then xonly_pubkey_parse should fail as well. */
for (i = 0; i < count; i++) {
unsigned char rand33[33];
rustsecp256k1_v0_4_0_testrand256(&rand33[1]);
rustsecp256k1_v0_4_1_testrand256(&rand33[1]);
rand33[0] = SECP256K1_TAG_PUBKEY_EVEN;
if (!rustsecp256k1_v0_4_0_ec_pubkey_parse(ctx, &pk, rand33, 33)) {
if (!rustsecp256k1_v0_4_1_ec_pubkey_parse(ctx, &pk, rand33, 33)) {
memset(&xonly_pk, 1, sizeof(xonly_pk));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
} else {
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1);
}
}
CHECK(ecount == 2);
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(verify);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(verify);
}
void test_xonly_pubkey_comparison(void) {
unsigned char pk1_ser[32] = {
0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11,
0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23
};
const unsigned char pk2_ser[32] = {
0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d,
0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c
};
rustsecp256k1_v0_4_1_xonly_pubkey pk1;
rustsecp256k1_v0_4_1_xonly_pubkey pk2;
int ecount = 0;
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, NULL, &pk2) < 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, NULL) > 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk2) == 0);
CHECK(ecount == 2);
memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0);
CHECK(ecount == 5);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0);
CHECK(ecount == 6);
rustsecp256k1_v0_4_1_context_destroy(none);
}
void test_xonly_pubkey_tweak(void) {
unsigned char zeros64[64] = { 0 };
unsigned char overflows[32];
unsigned char sk[32];
rustsecp256k1_v0_4_0_pubkey internal_pk;
rustsecp256k1_v0_4_0_xonly_pubkey internal_xonly_pk;
rustsecp256k1_v0_4_0_pubkey output_pk;
rustsecp256k1_v0_4_1_pubkey internal_pk;
rustsecp256k1_v0_4_1_xonly_pubkey internal_xonly_pk;
rustsecp256k1_v0_4_1_pubkey output_pk;
int pk_parity;
unsigned char tweak[32];
int i;
int ecount;
rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
rustsecp256k1_v0_4_0_testrand256(tweak);
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
rustsecp256k1_v0_4_1_testrand256(tweak);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0);
CHECK(ecount == 4);
/* NULL internal_xonly_pk zeroes the output_pk */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 5);
/* NULL tweak zeroes the output_pk */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* Invalid tweak zeroes the output_pk */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* A zero tweak is fine */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1);
/* Fails if the resulting key was infinity */
for (i = 0; i < count; i++) {
rustsecp256k1_v0_4_0_scalar scalar_tweak;
rustsecp256k1_v0_4_1_scalar scalar_tweak;
/* Because sk may be negated before adding, we need to try with tweak =
* sk as well as tweak = -sk. */
rustsecp256k1_v0_4_0_scalar_set_b32(&scalar_tweak, sk, NULL);
rustsecp256k1_v0_4_0_scalar_negate(&scalar_tweak, &scalar_tweak);
rustsecp256k1_v0_4_0_scalar_get_b32(tweak, &scalar_tweak);
CHECK((rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0)
|| (rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
rustsecp256k1_v0_4_1_scalar_set_b32(&scalar_tweak, sk, NULL);
rustsecp256k1_v0_4_1_scalar_negate(&scalar_tweak, &scalar_tweak);
rustsecp256k1_v0_4_1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0)
|| (rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
}
/* Invalid pk with a valid tweak */
memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
rustsecp256k1_v0_4_0_testrand256(tweak);
rustsecp256k1_v0_4_1_testrand256(tweak);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(verify);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(verify);
}
void test_xonly_pubkey_tweak_check(void) {
unsigned char zeros64[64] = { 0 };
unsigned char overflows[32];
unsigned char sk[32];
rustsecp256k1_v0_4_0_pubkey internal_pk;
rustsecp256k1_v0_4_0_xonly_pubkey internal_xonly_pk;
rustsecp256k1_v0_4_0_pubkey output_pk;
rustsecp256k1_v0_4_0_xonly_pubkey output_xonly_pk;
rustsecp256k1_v0_4_1_pubkey internal_pk;
rustsecp256k1_v0_4_1_xonly_pubkey internal_xonly_pk;
rustsecp256k1_v0_4_1_pubkey output_pk;
rustsecp256k1_v0_4_1_xonly_pubkey output_xonly_pk;
unsigned char output_pk32[32];
unsigned char buf32[32];
int pk_parity;
unsigned char tweak[32];
int ecount;
rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
rustsecp256k1_v0_4_0_testrand256(tweak);
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
rustsecp256k1_v0_4_1_testrand256(tweak);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 3);
/* invalid pk_parity value */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 5);
memset(tweak, 1, sizeof(tweak));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1);
/* Wrong pk_parity */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0);
/* Wrong public key */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
/* Overflowing tweak not allowed */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(ecount == 5);
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(verify);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(verify);
}
/* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1
@ -282,231 +319,256 @@ void test_xonly_pubkey_tweak_check(void) {
#define N_PUBKEYS 32
void test_xonly_pubkey_tweak_recursive(void) {
unsigned char sk[32];
rustsecp256k1_v0_4_0_pubkey pk[N_PUBKEYS];
rustsecp256k1_v0_4_1_pubkey pk[N_PUBKEYS];
unsigned char pk_serialized[32];
unsigned char tweak[N_PUBKEYS - 1][32];
int i;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pk[0], sk) == 1);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pk[0], sk) == 1);
/* Add tweaks */
for (i = 0; i < N_PUBKEYS - 1; i++) {
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk;
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk;
memset(tweak[i], i + 1, sizeof(tweak[i]));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1);
}
/* Verify tweaks */
for (i = N_PUBKEYS - 1; i > 0; i--) {
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk;
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk;
int pk_parity;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1);
}
}
#undef N_PUBKEYS
void test_keypair(void) {
unsigned char sk[32];
unsigned char sk_tmp[32];
unsigned char zeros96[96] = { 0 };
unsigned char overflows[32];
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_0_pubkey pk, pk_tmp;
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pk, xonly_pk_tmp;
rustsecp256k1_v0_4_1_keypair keypair;
rustsecp256k1_v0_4_1_pubkey pk, pk_tmp;
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pk, xonly_pk_tmp;
int pk_parity, pk_parity_tmp;
int ecount;
rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
memset(overflows, 0xFF, sizeof(overflows));
/* Test keypair_create */
ecount = 0;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(none, &keypair, sk) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(none, &keypair, sk) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(verify, &keypair, sk) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(verify, &keypair, sk) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, NULL, sk) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, NULL, sk) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 4);
/* Invalid secret key */
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, zeros96) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, overflows) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, zeros96) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, overflows) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
/* Test keypair_pub */
ecount = 0;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, NULL, &keypair) == 0);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* Using an invalid keypair is fine for keypair_pub */
memset(&keypair, 0, sizeof(keypair));
CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* keypair holds the same pubkey as pubkey_create */
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_pub(none, &pk_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(none, &pk_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
/** Test keypair_xonly_pub **/
ecount = 0;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
/* Using an invalid keypair will set the xonly_pk to 0 (first reset
* xonly_pk). */
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
memset(&keypair, 0, sizeof(keypair));
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(ecount == 3);
/** keypair holds the same xonly pubkey as pubkey_create **/
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
CHECK(pk_parity == pk_parity_tmp);
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(verify);
/* Test keypair_seckey */
ecount = 0;
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0);
/* keypair returns the same seckey it got */
CHECK(rustsecp256k1_v0_4_1_keypair_create(sign, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0);
/* Using an invalid keypair is fine for keypair_seckey */
memset(&keypair, 0, sizeof(keypair));
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk_tmp, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(verify);
}
void test_keypair_add(void) {
unsigned char sk[32];
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_1_keypair keypair;
unsigned char overflows[32];
unsigned char zeros96[96] = { 0 };
unsigned char tweak[32];
int i;
int ecount = 0;
rustsecp256k1_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
rustsecp256k1_v0_4_1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
rustsecp256k1_v0_4_1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
rustsecp256k1_v0_4_1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
rustsecp256k1_v0_4_0_testrand256(sk);
rustsecp256k1_v0_4_0_testrand256(tweak);
rustsecp256k1_v0_4_1_testrand256(sk);
rustsecp256k1_v0_4_1_testrand256(tweak);
memset(overflows, 0xFF, 32);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(none, &keypair, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(none, &keypair, tweak) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(sign, &keypair, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(sign, &keypair, tweak) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, NULL, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, NULL, tweak) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0);
CHECK(ecount == 4);
/* This does not set the keypair to zeroes */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
/* Invalid tweak zeroes the keypair */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* A zero tweak is fine */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1);
/* Fails if the resulting keypair was (sk=0, pk=infinity) */
for (i = 0; i < count; i++) {
rustsecp256k1_v0_4_0_scalar scalar_tweak;
rustsecp256k1_v0_4_0_keypair keypair_tmp;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
rustsecp256k1_v0_4_1_scalar scalar_tweak;
rustsecp256k1_v0_4_1_keypair keypair_tmp;
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
memcpy(&keypair_tmp, &keypair, sizeof(keypair));
/* Because sk may be negated before adding, we need to try with tweak =
* sk as well as tweak = -sk. */
rustsecp256k1_v0_4_0_scalar_set_b32(&scalar_tweak, sk, NULL);
rustsecp256k1_v0_4_0_scalar_negate(&scalar_tweak, &scalar_tweak);
rustsecp256k1_v0_4_0_scalar_get_b32(tweak, &scalar_tweak);
CHECK((rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0)
|| (rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0
|| rustsecp256k1_v0_4_0_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
rustsecp256k1_v0_4_1_scalar_set_b32(&scalar_tweak, sk, NULL);
rustsecp256k1_v0_4_1_scalar_negate(&scalar_tweak, &scalar_tweak);
rustsecp256k1_v0_4_1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0)
|| (rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0
|| rustsecp256k1_v0_4_1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
}
/* Invalid keypair with a valid tweak */
memset(&keypair, 0, sizeof(keypair));
rustsecp256k1_v0_4_0_testrand256(tweak);
rustsecp256k1_v0_4_1_testrand256(tweak);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* Only seckey part of keypair invalid */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
memset(&keypair, 0, 32);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(ecount == 2);
/* Only pubkey part of keypair invalid */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
memset(&keypair.data[32], 0, 64);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(ecount == 3);
/* Check that the keypair_tweak_add implementation is correct */
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
for (i = 0; i < count; i++) {
rustsecp256k1_v0_4_0_xonly_pubkey internal_pk;
rustsecp256k1_v0_4_0_xonly_pubkey output_pk;
rustsecp256k1_v0_4_0_pubkey output_pk_xy;
rustsecp256k1_v0_4_0_pubkey output_pk_expected;
rustsecp256k1_v0_4_1_xonly_pubkey internal_pk;
rustsecp256k1_v0_4_1_xonly_pubkey output_pk;
rustsecp256k1_v0_4_1_pubkey output_pk_xy;
rustsecp256k1_v0_4_1_pubkey output_pk_expected;
unsigned char pk32[32];
unsigned char sk32[32];
int pk_parity;
rustsecp256k1_v0_4_0_testrand256(tweak);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
rustsecp256k1_v0_4_1_testrand256(tweak);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
/* Check that it passes xonly_pubkey_tweak_add_check */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1);
/* Check that the resulting pubkey matches xonly_pubkey_tweak_add */
CHECK(rustsecp256k1_v0_4_0_keypair_pub(ctx, &output_pk_xy, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
/* Check that the secret key in the keypair is tweaked correctly */
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_sec(none, sk32, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &output_pk_expected, sk32) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
}
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(verify);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(verify);
}
void run_extrakeys_tests(void) {
@ -515,6 +577,7 @@ void run_extrakeys_tests(void) {
test_xonly_pubkey_tweak();
test_xonly_pubkey_tweak_check();
test_xonly_pubkey_tweak_recursive();
test_xonly_pubkey_comparison();
/* keypair tests */
test_keypair();

View File

@ -1,4 +1,4 @@
include_HEADERS += include/rustsecp256k1_v0_4_0_recovery.h
include_HEADERS += include/rustsecp256k1_v0_4_1_recovery.h
noinst_HEADERS += src/modules/recovery/main_impl.h
noinst_HEADERS += src/modules/recovery/tests_impl.h
noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h

View File

@ -7,36 +7,36 @@
#ifndef SECP256K1_MODULE_RECOVERY_MAIN_H
#define SECP256K1_MODULE_RECOVERY_MAIN_H
#include "include/secp256k1_recovery.h"
#include "../../../include/secp256k1_recovery.h"
static void rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, int* recid, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig) {
static void rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, int* recid, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig) {
(void)ctx;
if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) {
/* When the rustsecp256k1_v0_4_0_scalar type is exactly 32 byte, use its
* representation inside rustsecp256k1_v0_4_0_ecdsa_signature, as conversion is very fast.
* Note that rustsecp256k1_v0_4_0_ecdsa_signature_save must use the same representation. */
if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) {
/* When the rustsecp256k1_v0_4_1_scalar type is exactly 32 byte, use its
* representation inside rustsecp256k1_v0_4_1_ecdsa_signature, as conversion is very fast.
* Note that rustsecp256k1_v0_4_1_ecdsa_signature_save must use the same representation. */
memcpy(r, &sig->data[0], 32);
memcpy(s, &sig->data[32], 32);
} else {
rustsecp256k1_v0_4_0_scalar_set_b32(r, &sig->data[0], NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(s, &sig->data[32], NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(r, &sig->data[0], NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(s, &sig->data[32], NULL);
}
*recid = sig->data[64];
}
static void rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s, int recid) {
if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) {
static void rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s, int recid) {
if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) {
memcpy(&sig->data[0], r, 32);
memcpy(&sig->data[32], s, 32);
} else {
rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[0], r);
rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[32], s);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[0], r);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[32], s);
}
sig->data[64] = recid;
}
int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
rustsecp256k1_v0_4_1_scalar r, s;
int ret = 1;
int overflow = 0;
@ -45,111 +45,111 @@ int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(const rustsec
ARG_CHECK(input64 != NULL);
ARG_CHECK(recid >= 0 && recid <= 3);
rustsecp256k1_v0_4_0_scalar_set_b32(&r, &input64[0], &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(&r, &input64[0], &overflow);
ret &= !overflow;
rustsecp256k1_v0_4_0_scalar_set_b32(&s, &input64[32], &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(&s, &input64[32], &overflow);
ret &= !overflow;
if (ret) {
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
} else {
memset(sig, 0, sizeof(*sig));
}
return ret;
}
int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sig) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sig) {
rustsecp256k1_v0_4_1_scalar r, s;
(void)ctx;
ARG_CHECK(output64 != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(recid != NULL);
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
rustsecp256k1_v0_4_0_scalar_get_b32(&output64[0], &r);
rustsecp256k1_v0_4_0_scalar_get_b32(&output64[32], &s);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
rustsecp256k1_v0_4_1_scalar_get_b32(&output64[0], &r);
rustsecp256k1_v0_4_1_scalar_get_b32(&output64[32], &s);
return 1;
}
int rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature* sigin) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature* sigin) {
rustsecp256k1_v0_4_1_scalar r, s;
int recid;
(void)ctx;
ARG_CHECK(sig != NULL);
ARG_CHECK(sigin != NULL);
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s);
return 1;
}
static int rustsecp256k1_v0_4_0_ecdsa_sig_recover(const rustsecp256k1_v0_4_0_ecmult_context *ctx, const rustsecp256k1_v0_4_0_scalar *sigr, const rustsecp256k1_v0_4_0_scalar* sigs, rustsecp256k1_v0_4_0_ge *pubkey, const rustsecp256k1_v0_4_0_scalar *message, int recid) {
static int rustsecp256k1_v0_4_1_ecdsa_sig_recover(const rustsecp256k1_v0_4_1_ecmult_context *ctx, const rustsecp256k1_v0_4_1_scalar *sigr, const rustsecp256k1_v0_4_1_scalar* sigs, rustsecp256k1_v0_4_1_ge *pubkey, const rustsecp256k1_v0_4_1_scalar *message, int recid) {
unsigned char brx[32];
rustsecp256k1_v0_4_0_fe fx;
rustsecp256k1_v0_4_0_ge x;
rustsecp256k1_v0_4_0_gej xj;
rustsecp256k1_v0_4_0_scalar rn, u1, u2;
rustsecp256k1_v0_4_0_gej qj;
rustsecp256k1_v0_4_1_fe fx;
rustsecp256k1_v0_4_1_ge x;
rustsecp256k1_v0_4_1_gej xj;
rustsecp256k1_v0_4_1_scalar rn, u1, u2;
rustsecp256k1_v0_4_1_gej qj;
int r;
if (rustsecp256k1_v0_4_0_scalar_is_zero(sigr) || rustsecp256k1_v0_4_0_scalar_is_zero(sigs)) {
if (rustsecp256k1_v0_4_1_scalar_is_zero(sigr) || rustsecp256k1_v0_4_1_scalar_is_zero(sigs)) {
return 0;
}
rustsecp256k1_v0_4_0_scalar_get_b32(brx, sigr);
r = rustsecp256k1_v0_4_0_fe_set_b32(&fx, brx);
rustsecp256k1_v0_4_1_scalar_get_b32(brx, sigr);
r = rustsecp256k1_v0_4_1_fe_set_b32(&fx, brx);
(void)r;
VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
if (recid & 2) {
if (rustsecp256k1_v0_4_0_fe_cmp_var(&fx, &rustsecp256k1_v0_4_0_ecdsa_const_p_minus_order) >= 0) {
if (rustsecp256k1_v0_4_1_fe_cmp_var(&fx, &rustsecp256k1_v0_4_1_ecdsa_const_p_minus_order) >= 0) {
return 0;
}
rustsecp256k1_v0_4_0_fe_add(&fx, &rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe);
rustsecp256k1_v0_4_1_fe_add(&fx, &rustsecp256k1_v0_4_1_ecdsa_const_order_as_fe);
}
if (!rustsecp256k1_v0_4_0_ge_set_xo_var(&x, &fx, recid & 1)) {
if (!rustsecp256k1_v0_4_1_ge_set_xo_var(&x, &fx, recid & 1)) {
return 0;
}
rustsecp256k1_v0_4_0_gej_set_ge(&xj, &x);
rustsecp256k1_v0_4_0_scalar_inverse_var(&rn, sigr);
rustsecp256k1_v0_4_0_scalar_mul(&u1, &rn, message);
rustsecp256k1_v0_4_0_scalar_negate(&u1, &u1);
rustsecp256k1_v0_4_0_scalar_mul(&u2, &rn, sigs);
rustsecp256k1_v0_4_0_ecmult(ctx, &qj, &xj, &u2, &u1);
rustsecp256k1_v0_4_0_ge_set_gej_var(pubkey, &qj);
return !rustsecp256k1_v0_4_0_gej_is_infinity(&qj);
rustsecp256k1_v0_4_1_gej_set_ge(&xj, &x);
rustsecp256k1_v0_4_1_scalar_inverse_var(&rn, sigr);
rustsecp256k1_v0_4_1_scalar_mul(&u1, &rn, message);
rustsecp256k1_v0_4_1_scalar_negate(&u1, &u1);
rustsecp256k1_v0_4_1_scalar_mul(&u2, &rn, sigs);
rustsecp256k1_v0_4_1_ecmult(ctx, &qj, &xj, &u2, &u1);
rustsecp256k1_v0_4_1_ge_set_gej_var(pubkey, &qj);
return !rustsecp256k1_v0_4_1_gej_is_infinity(&qj);
}
int rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_1_scalar r, s;
int ret, recid;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msghash32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(seckey != NULL);
ret = rustsecp256k1_v0_4_0_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata);
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
ret = rustsecp256k1_v0_4_1_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
return ret;
}
int rustsecp256k1_v0_4_0_ecdsa_recover(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const rustsecp256k1_v0_4_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) {
rustsecp256k1_v0_4_0_ge q;
rustsecp256k1_v0_4_0_scalar r, s;
rustsecp256k1_v0_4_0_scalar m;
int rustsecp256k1_v0_4_1_ecdsa_recover(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const rustsecp256k1_v0_4_1_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) {
rustsecp256k1_v0_4_1_ge q;
rustsecp256k1_v0_4_1_scalar r, s;
rustsecp256k1_v0_4_1_scalar m;
int recid;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(msghash32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(pubkey != NULL);
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */
rustsecp256k1_v0_4_0_scalar_set_b32(&m, msghash32, NULL);
if (rustsecp256k1_v0_4_0_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &q);
rustsecp256k1_v0_4_1_scalar_set_b32(&m, msghash32, NULL);
if (rustsecp256k1_v0_4_1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &q);
return 1;
} else {
memset(pubkey, 0, sizeof(*pubkey));

View File

@ -8,9 +8,9 @@
#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
#include "src/modules/recovery/main_impl.h"
#include "include/secp256k1_recovery.h"
#include "../../../include/secp256k1_recovery.h"
void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) {
void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) {
int i, j, k;
uint64_t iter = 0;
@ -20,23 +20,23 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons
if (skip_section(&iter)) continue;
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
rustsecp256k1_v0_4_0_fe r_dot_y_normalized;
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_0_ecdsa_signature sig;
rustsecp256k1_v0_4_0_scalar sk, msg, r, s, expected_r;
rustsecp256k1_v0_4_1_fe r_dot_y_normalized;
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_1_ecdsa_signature sig;
rustsecp256k1_v0_4_1_scalar sk, msg, r, s, expected_r;
unsigned char sk32[32], msg32[32];
int expected_recid;
int recid;
int overflow;
rustsecp256k1_v0_4_0_scalar_set_int(&msg, i);
rustsecp256k1_v0_4_0_scalar_set_int(&sk, j);
rustsecp256k1_v0_4_0_scalar_get_b32(sk32, &sk);
rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg);
rustsecp256k1_v0_4_1_scalar_set_int(&msg, i);
rustsecp256k1_v0_4_1_scalar_set_int(&sk, j);
rustsecp256k1_v0_4_1_scalar_get_b32(sk32, &sk);
rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg);
rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_4_0_nonce_function_smallint, &k);
rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_4_1_nonce_function_smallint, &k);
/* Check directly */
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
r_from_k(&expected_r, group, k, &overflow);
CHECK(r == expected_r);
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
@ -50,18 +50,18 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons
* in the real group. */
expected_recid = overflow ? 2 : 0;
r_dot_y_normalized = group[k].y;
rustsecp256k1_v0_4_0_fe_normalize(&r_dot_y_normalized);
rustsecp256k1_v0_4_1_fe_normalize(&r_dot_y_normalized);
/* Also the recovery id is flipped depending if we hit the low-s branch */
if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
expected_recid |= rustsecp256k1_v0_4_0_fe_is_odd(&r_dot_y_normalized);
expected_recid |= rustsecp256k1_v0_4_1_fe_is_odd(&r_dot_y_normalized);
} else {
expected_recid |= !rustsecp256k1_v0_4_0_fe_is_odd(&r_dot_y_normalized);
expected_recid |= !rustsecp256k1_v0_4_1_fe_is_odd(&r_dot_y_normalized);
}
CHECK(recid == expected_recid);
/* Convert to a standard sig then check */
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, &sig);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, &sig);
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
@ -79,7 +79,7 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_4_0_context *ctx, cons
}
}
void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) {
void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) {
/* This is essentially a copy of test_exhaustive_verify, with recovery added */
int s, r, msg, key;
uint64_t iter = 0;
@ -87,41 +87,41 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, co
for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
rustsecp256k1_v0_4_0_ge nonconst_ge;
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_0_ecdsa_signature sig;
rustsecp256k1_v0_4_0_pubkey pk;
rustsecp256k1_v0_4_0_scalar sk_s, msg_s, r_s, s_s;
rustsecp256k1_v0_4_0_scalar s_times_k_s, msg_plus_r_times_sk_s;
rustsecp256k1_v0_4_1_ge nonconst_ge;
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_1_ecdsa_signature sig;
rustsecp256k1_v0_4_1_pubkey pk;
rustsecp256k1_v0_4_1_scalar sk_s, msg_s, r_s, s_s;
rustsecp256k1_v0_4_1_scalar s_times_k_s, msg_plus_r_times_sk_s;
int recid = 0;
int k, should_verify;
unsigned char msg32[32];
if (skip_section(&iter)) continue;
rustsecp256k1_v0_4_0_scalar_set_int(&s_s, s);
rustsecp256k1_v0_4_0_scalar_set_int(&r_s, r);
rustsecp256k1_v0_4_0_scalar_set_int(&msg_s, msg);
rustsecp256k1_v0_4_0_scalar_set_int(&sk_s, key);
rustsecp256k1_v0_4_0_scalar_get_b32(msg32, &msg_s);
rustsecp256k1_v0_4_1_scalar_set_int(&s_s, s);
rustsecp256k1_v0_4_1_scalar_set_int(&r_s, r);
rustsecp256k1_v0_4_1_scalar_set_int(&msg_s, msg);
rustsecp256k1_v0_4_1_scalar_set_int(&sk_s, key);
rustsecp256k1_v0_4_1_scalar_get_b32(msg32, &msg_s);
/* Verify by hand */
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
rustsecp256k1_v0_4_0_scalar check_x_s;
rustsecp256k1_v0_4_1_scalar check_x_s;
r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
rustsecp256k1_v0_4_0_scalar_set_int(&s_times_k_s, k);
rustsecp256k1_v0_4_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
rustsecp256k1_v0_4_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
rustsecp256k1_v0_4_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
should_verify |= rustsecp256k1_v0_4_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
rustsecp256k1_v0_4_1_scalar_set_int(&s_times_k_s, k);
rustsecp256k1_v0_4_1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
rustsecp256k1_v0_4_1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
rustsecp256k1_v0_4_1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
should_verify |= rustsecp256k1_v0_4_1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
}
}
/* nb we have a "high s" rule */
should_verify &= !rustsecp256k1_v0_4_0_scalar_is_high(&s_s);
should_verify &= !rustsecp256k1_v0_4_1_scalar_is_high(&s_s);
/* We would like to try recovering the pubkey and checking that it matches,
* but pubkey recovery is impossible in the exhaustive tests (the reason
@ -129,19 +129,19 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_4_0_context *ctx, co
* overlap between the sets, so there are no valid signatures). */
/* Verify by converting to a standard signature and calling verify */
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
rustsecp256k1_v0_4_0_pubkey_save(&pk, &nonconst_ge);
rustsecp256k1_v0_4_1_pubkey_save(&pk, &nonconst_ge);
CHECK(should_verify ==
rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pk));
rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pk));
}
}
}
}
}
static void test_exhaustive_recovery(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_ge *group) {
static void test_exhaustive_recovery(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_ge *group) {
test_exhaustive_recovery_sign(ctx, group);
test_exhaustive_recovery_verify(ctx, group);
}

View File

@ -25,19 +25,19 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
return rustsecp256k1_v0_4_0_testrand_bits(1);
return rustsecp256k1_v0_4_1_testrand_bits(1);
}
void test_ecdsa_recovery_api(void) {
/* Setup contexts that just count errors */
rustsecp256k1_v0_4_0_context *none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_4_0_context *sign = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_0_context *vrfy = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_0_context *both = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_0_pubkey recpubkey;
rustsecp256k1_v0_4_0_ecdsa_signature normal_sig;
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature recsig;
rustsecp256k1_v0_4_1_context *none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_4_1_context *sign = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_1_context *vrfy = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_1_context *both = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_1_pubkey pubkey;
rustsecp256k1_v0_4_1_pubkey recpubkey;
rustsecp256k1_v0_4_1_ecdsa_signature normal_sig;
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature recsig;
unsigned char privkey[32] = { 1 };
unsigned char message[32] = { 2 };
int32_t ecount = 0;
@ -49,160 +49,160 @@ void test_ecdsa_recovery_api(void) {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
rustsecp256k1_v0_4_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
/* Construct and verify corresponding public key. */
CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Check bad contexts and NULLs for signing */
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
CHECK(ecount == 5);
/* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
CHECK(ecount == 5);
/* These will all fail, but not in ARG_CHECK way */
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
/* This one will succeed. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 5);
/* Check signing with a goofy nonce function */
/* Check bad contexts and NULLs for recovery */
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, NULL, &recsig, message) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, NULL, &recsig, message) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
CHECK(ecount == 5);
/* Check NULLs for conversion */
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
/* Check NULLs for de/serialization */
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
CHECK(ecount == 5);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
CHECK(ecount == 6);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
CHECK(ecount == 7);
/* overflow in signature will fail but not affect ecount */
memcpy(sig, over_privkey, 32);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
CHECK(ecount == 7);
/* cleanup */
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(vrfy);
rustsecp256k1_v0_4_0_context_destroy(both);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(vrfy);
rustsecp256k1_v0_4_1_context_destroy(both);
}
void test_ecdsa_recovery_end_to_end(void) {
unsigned char extra[32] = {0x00};
unsigned char privkey[32];
unsigned char message[32];
rustsecp256k1_v0_4_0_ecdsa_signature signature[5];
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsignature[5];
rustsecp256k1_v0_4_1_ecdsa_signature signature[5];
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsignature[5];
unsigned char sig[74];
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_0_pubkey recpubkey;
rustsecp256k1_v0_4_1_pubkey pubkey;
rustsecp256k1_v0_4_1_pubkey recpubkey;
int recid = 0;
/* Generate a random key and message. */
{
rustsecp256k1_v0_4_0_scalar msg, key;
rustsecp256k1_v0_4_1_scalar msg, key;
random_scalar_order_test(&msg);
random_scalar_order_test(&key);
rustsecp256k1_v0_4_0_scalar_get_b32(privkey, &key);
rustsecp256k1_v0_4_0_scalar_get_b32(message, &msg);
rustsecp256k1_v0_4_1_scalar_get_b32(privkey, &key);
rustsecp256k1_v0_4_1_scalar_get_b32(message, &msg);
}
/* Construct and verify corresponding public key. */
CHECK(rustsecp256k1_v0_4_0_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_4_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Serialize/parse compact and verify/recover. */
extra[0] = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
extra[31] = 1;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
extra[31] = 0;
extra[0] = 1;
CHECK(rustsecp256k1_v0_4_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&signature[4], &signature[0], 64) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&signature[4], &signature[0], 64) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
/* Parse compact (with recovery id) and recover. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
sig[rustsecp256k1_v0_4_0_testrand_bits(6)] += 1 + rustsecp256k1_v0_4_0_testrand_int(255);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
sig[rustsecp256k1_v0_4_1_testrand_bits(6)] += 1 + rustsecp256k1_v0_4_1_testrand_int(255);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
rustsecp256k1_v0_4_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
rustsecp256k1_v0_4_1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */
@ -225,7 +225,7 @@ void test_ecdsa_recovery_edge_cases(void) {
0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
};
rustsecp256k1_v0_4_0_pubkey pubkey;
rustsecp256k1_v0_4_1_pubkey pubkey;
/* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
const unsigned char sigb64[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@ -237,19 +237,19 @@ void test_ecdsa_recovery_edge_cases(void) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
rustsecp256k1_v0_4_0_pubkey pubkeyb;
rustsecp256k1_v0_4_0_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_0_ecdsa_signature sig;
rustsecp256k1_v0_4_1_pubkey pubkeyb;
rustsecp256k1_v0_4_1_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_4_1_ecdsa_signature sig;
int recid;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
CHECK(!rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
CHECK(!rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
for (recid = 0; recid < 4; recid++) {
int i;
@ -294,40 +294,40 @@ void test_ecdsa_recovery_edge_cases(void) {
0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
};
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
for (recid2 = 0; recid2 < 4; recid2++) {
rustsecp256k1_v0_4_0_pubkey pubkey2b;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
rustsecp256k1_v0_4_1_pubkey pubkey2b;
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
/* Verifying with (order + r,4) should always fail. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
/* DER parsing tests. */
/* Zero length r/s. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
/* Leading zeros. */
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
sigbderalt3[4] = 1;
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbderalt4[7] = 1;
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
/* Damage signature. */
sigbder[7]++;
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbder[7]--;
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
for(i = 0; i < 8; i++) {
int c;
unsigned char orig = sigbder[i];
@ -337,7 +337,7 @@ void test_ecdsa_recovery_edge_cases(void) {
continue;
}
sigbder[i] = c;
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
sigbder[i] = orig;
}
@ -357,25 +357,25 @@ void test_ecdsa_recovery_edge_cases(void) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
};
rustsecp256k1_v0_4_0_pubkey pubkeyc;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
rustsecp256k1_v0_4_1_pubkey pubkeyc;
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
sigcder[4] = 0;
sigc64[31] = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
sigcder[4] = 1;
sigcder[7] = 0;
sigc64[31] = 1;
sigc64[63] = 0;
CHECK(rustsecp256k1_v0_4_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_4_1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
}
}

View File

@ -1,4 +1,4 @@
include_HEADERS += include/rustsecp256k1_v0_4_0_schnorrsig.h
include_HEADERS += include/rustsecp256k1_v0_4_1_schnorrsig.h
noinst_HEADERS += src/modules/schnorrsig/main_impl.h
noinst_HEADERS += src/modules/schnorrsig/tests_impl.h
noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h

View File

@ -4,17 +4,17 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_SCHNORRSIG_MAIN_
#define _SECP256K1_MODULE_SCHNORRSIG_MAIN_
#ifndef SECP256K1_MODULE_SCHNORRSIG_MAIN_H
#define SECP256K1_MODULE_SCHNORRSIG_MAIN_H
#include "include/secp256k1.h"
#include "include/secp256k1_schnorrsig.h"
#include "hash.h"
#include "../../../include/secp256k1.h"
#include "../../../include/secp256k1_schnorrsig.h"
#include "../../hash.h"
/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
* SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */
static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_4_0_sha256 *sha) {
rustsecp256k1_v0_4_0_sha256_initialize(sha);
static void rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_4_1_sha256 *sha) {
rustsecp256k1_v0_4_1_sha256_initialize(sha);
sha->s[0] = 0x46615b35ul;
sha->s[1] = 0xf4bfbff7ul;
sha->s[2] = 0x9f8dc671ul;
@ -29,8 +29,8 @@ static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(rustsecp256
/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
* SHA256 to SHA256("BIP0340/aux")||SHA256("BIP0340/aux"). */
static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_4_0_sha256 *sha) {
rustsecp256k1_v0_4_0_sha256_initialize(sha);
static void rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_4_1_sha256 *sha) {
rustsecp256k1_v0_4_1_sha256_initialize(sha);
sha->s[0] = 0x24dd3219ul;
sha->s[1] = 0x4eba7e70ul;
sha->s[2] = 0xca0fabb9ul;
@ -48,7 +48,7 @@ static void rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(rustsec
static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0";
static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha;
unsigned char masked_key[32];
int i;
@ -57,9 +57,9 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
}
if (data != NULL) {
rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(&sha);
rustsecp256k1_v0_4_0_sha256_write(&sha, data, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, masked_key);
rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(&sha);
rustsecp256k1_v0_4_1_sha256_write(&sha, data, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, masked_key);
for (i = 0; i < 32; i++) {
masked_key[i] ^= key32[i];
}
@ -68,35 +68,35 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
/* Tag the hash with algo16 which is important to avoid nonce reuse across
* algorithms. If this nonce function is used in BIP-340 signing as defined
* in the spec, an optimized tagging implementation is used. */
if (rustsecp256k1_v0_4_0_memcmp_var(algo16, bip340_algo16, 16) == 0) {
rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(&sha);
if (rustsecp256k1_v0_4_1_memcmp_var(algo16, bip340_algo16, 16) == 0) {
rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(&sha);
} else {
int algo16_len = 16;
/* Remove terminating null bytes */
while (algo16_len > 0 && !algo16[algo16_len - 1]) {
algo16_len--;
}
rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, algo16, algo16_len);
rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, algo16, algo16_len);
}
/* Hash (masked-)key||pk||msg using the tagged hash as per the spec */
if (data != NULL) {
rustsecp256k1_v0_4_0_sha256_write(&sha, masked_key, 32);
rustsecp256k1_v0_4_1_sha256_write(&sha, masked_key, 32);
} else {
rustsecp256k1_v0_4_0_sha256_write(&sha, key32, 32);
rustsecp256k1_v0_4_1_sha256_write(&sha, key32, 32);
}
rustsecp256k1_v0_4_0_sha256_write(&sha, xonly_pk32, 32);
rustsecp256k1_v0_4_0_sha256_write(&sha, msg32, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, nonce32);
rustsecp256k1_v0_4_1_sha256_write(&sha, xonly_pk32, 32);
rustsecp256k1_v0_4_1_sha256_write(&sha, msg32, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, nonce32);
return 1;
}
const rustsecp256k1_v0_4_0_nonce_function_hardened rustsecp256k1_v0_4_0_nonce_function_bip340 = nonce_function_bip340;
const rustsecp256k1_v0_4_1_nonce_function_hardened rustsecp256k1_v0_4_1_nonce_function_bip340 = nonce_function_bip340;
/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
* SHA256 to SHA256("BIP0340/challenge")||SHA256("BIP0340/challenge"). */
static void rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_0_sha256 *sha) {
rustsecp256k1_v0_4_0_sha256_initialize(sha);
static void rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_1_sha256 *sha) {
rustsecp256k1_v0_4_1_sha256_initialize(sha);
sha->s[0] = 0x9cecba11ul;
sha->s[1] = 0x23925381ul;
sha->s[2] = 0x11679112ul;
@ -108,132 +108,132 @@ static void rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_4_0_s
sha->bytes = 64;
}
static void rustsecp256k1_v0_4_0_schnorrsig_challenge(rustsecp256k1_v0_4_0_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
static void rustsecp256k1_v0_4_1_schnorrsig_challenge(rustsecp256k1_v0_4_1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
{
unsigned char buf[32];
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha;
/* tagged hash(r.x, pk.x, msg32) */
rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(&sha);
rustsecp256k1_v0_4_0_sha256_write(&sha, r32, 32);
rustsecp256k1_v0_4_0_sha256_write(&sha, pubkey32, 32);
rustsecp256k1_v0_4_0_sha256_write(&sha, msg32, 32);
rustsecp256k1_v0_4_0_sha256_finalize(&sha, buf);
rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(&sha);
rustsecp256k1_v0_4_1_sha256_write(&sha, r32, 32);
rustsecp256k1_v0_4_1_sha256_write(&sha, pubkey32, 32);
rustsecp256k1_v0_4_1_sha256_write(&sha, msg32, 32);
rustsecp256k1_v0_4_1_sha256_finalize(&sha, buf);
/* Set scalar e to the challenge hash modulo the curve order as per
* BIP340. */
rustsecp256k1_v0_4_0_scalar_set_b32(e, buf, NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(e, buf, NULL);
}
int rustsecp256k1_v0_4_0_schnorrsig_sign(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_0_keypair *keypair, rustsecp256k1_v0_4_0_nonce_function_hardened noncefp, void *ndata) {
rustsecp256k1_v0_4_0_scalar sk;
rustsecp256k1_v0_4_0_scalar e;
rustsecp256k1_v0_4_0_scalar k;
rustsecp256k1_v0_4_0_gej rj;
rustsecp256k1_v0_4_0_ge pk;
rustsecp256k1_v0_4_0_ge r;
int rustsecp256k1_v0_4_1_schnorrsig_sign(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_1_keypair *keypair, rustsecp256k1_v0_4_1_nonce_function_hardened noncefp, void *ndata) {
rustsecp256k1_v0_4_1_scalar sk;
rustsecp256k1_v0_4_1_scalar e;
rustsecp256k1_v0_4_1_scalar k;
rustsecp256k1_v0_4_1_gej rj;
rustsecp256k1_v0_4_1_ge pk;
rustsecp256k1_v0_4_1_ge r;
unsigned char buf[32] = { 0 };
unsigned char pk_buf[32];
unsigned char seckey[32];
int ret = 1;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(sig64 != NULL);
ARG_CHECK(msg32 != NULL);
ARG_CHECK(keypair != NULL);
if (noncefp == NULL) {
noncefp = rustsecp256k1_v0_4_0_nonce_function_bip340;
noncefp = rustsecp256k1_v0_4_1_nonce_function_bip340;
}
ret &= rustsecp256k1_v0_4_0_keypair_load(ctx, &sk, &pk, keypair);
ret &= rustsecp256k1_v0_4_1_keypair_load(ctx, &sk, &pk, keypair);
/* Because we are signing for a x-only pubkey, the secret key is negated
* before signing if the point corresponding to the secret key does not
* have an even Y. */
if (rustsecp256k1_v0_4_0_fe_is_odd(&pk.y)) {
rustsecp256k1_v0_4_0_scalar_negate(&sk, &sk);
if (rustsecp256k1_v0_4_1_fe_is_odd(&pk.y)) {
rustsecp256k1_v0_4_1_scalar_negate(&sk, &sk);
}
rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sk);
rustsecp256k1_v0_4_0_fe_get_b32(pk_buf, &pk.x);
rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sk);
rustsecp256k1_v0_4_1_fe_get_b32(pk_buf, &pk.x);
ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata);
rustsecp256k1_v0_4_0_scalar_set_b32(&k, buf, NULL);
ret &= !rustsecp256k1_v0_4_0_scalar_is_zero(&k);
rustsecp256k1_v0_4_0_scalar_cmov(&k, &rustsecp256k1_v0_4_0_scalar_one, !ret);
rustsecp256k1_v0_4_1_scalar_set_b32(&k, buf, NULL);
ret &= !rustsecp256k1_v0_4_1_scalar_is_zero(&k);
rustsecp256k1_v0_4_1_scalar_cmov(&k, &rustsecp256k1_v0_4_1_scalar_one, !ret);
rustsecp256k1_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k);
rustsecp256k1_v0_4_0_ge_set_gej(&r, &rj);
rustsecp256k1_v0_4_1_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k);
rustsecp256k1_v0_4_1_ge_set_gej(&r, &rj);
/* We declassify r to allow using it as a branch point. This is fine
* because r is not a secret. */
rustsecp256k1_v0_4_0_declassify(ctx, &r, sizeof(r));
rustsecp256k1_v0_4_0_fe_normalize_var(&r.y);
if (rustsecp256k1_v0_4_0_fe_is_odd(&r.y)) {
rustsecp256k1_v0_4_0_scalar_negate(&k, &k);
rustsecp256k1_v0_4_1_declassify(ctx, &r, sizeof(r));
rustsecp256k1_v0_4_1_fe_normalize_var(&r.y);
if (rustsecp256k1_v0_4_1_fe_is_odd(&r.y)) {
rustsecp256k1_v0_4_1_scalar_negate(&k, &k);
}
rustsecp256k1_v0_4_0_fe_normalize_var(&r.x);
rustsecp256k1_v0_4_0_fe_get_b32(&sig64[0], &r.x);
rustsecp256k1_v0_4_1_fe_normalize_var(&r.x);
rustsecp256k1_v0_4_1_fe_get_b32(&sig64[0], &r.x);
rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
rustsecp256k1_v0_4_0_scalar_mul(&e, &e, &sk);
rustsecp256k1_v0_4_0_scalar_add(&e, &e, &k);
rustsecp256k1_v0_4_0_scalar_get_b32(&sig64[32], &e);
rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
rustsecp256k1_v0_4_1_scalar_mul(&e, &e, &sk);
rustsecp256k1_v0_4_1_scalar_add(&e, &e, &k);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig64[32], &e);
rustsecp256k1_v0_4_0_memczero(sig64, 64, !ret);
rustsecp256k1_v0_4_0_scalar_clear(&k);
rustsecp256k1_v0_4_0_scalar_clear(&sk);
rustsecp256k1_v0_4_1_memczero(sig64, 64, !ret);
rustsecp256k1_v0_4_1_scalar_clear(&k);
rustsecp256k1_v0_4_1_scalar_clear(&sk);
memset(seckey, 0, sizeof(seckey));
return ret;
}
int rustsecp256k1_v0_4_0_schnorrsig_verify(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_0_xonly_pubkey *pubkey) {
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_0_scalar e;
rustsecp256k1_v0_4_0_gej rj;
rustsecp256k1_v0_4_0_ge pk;
rustsecp256k1_v0_4_0_gej pkj;
rustsecp256k1_v0_4_0_fe rx;
rustsecp256k1_v0_4_0_ge r;
int rustsecp256k1_v0_4_1_schnorrsig_verify(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_4_1_xonly_pubkey *pubkey) {
rustsecp256k1_v0_4_1_scalar s;
rustsecp256k1_v0_4_1_scalar e;
rustsecp256k1_v0_4_1_gej rj;
rustsecp256k1_v0_4_1_ge pk;
rustsecp256k1_v0_4_1_gej pkj;
rustsecp256k1_v0_4_1_fe rx;
rustsecp256k1_v0_4_1_ge r;
unsigned char buf[32];
int overflow;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(sig64 != NULL);
ARG_CHECK(msg32 != NULL);
ARG_CHECK(pubkey != NULL);
if (!rustsecp256k1_v0_4_0_fe_set_b32(&rx, &sig64[0])) {
if (!rustsecp256k1_v0_4_1_fe_set_b32(&rx, &sig64[0])) {
return 0;
}
rustsecp256k1_v0_4_0_scalar_set_b32(&s, &sig64[32], &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(&s, &sig64[32], &overflow);
if (overflow) {
return 0;
}
if (!rustsecp256k1_v0_4_0_xonly_pubkey_load(ctx, &pk, pubkey)) {
if (!rustsecp256k1_v0_4_1_xonly_pubkey_load(ctx, &pk, pubkey)) {
return 0;
}
/* Compute e. */
rustsecp256k1_v0_4_0_fe_get_b32(buf, &pk.x);
rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
rustsecp256k1_v0_4_1_fe_get_b32(buf, &pk.x);
rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
/* Compute rj = s*G + (-e)*pkj */
rustsecp256k1_v0_4_0_scalar_negate(&e, &e);
rustsecp256k1_v0_4_0_gej_set_ge(&pkj, &pk);
rustsecp256k1_v0_4_0_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s);
rustsecp256k1_v0_4_1_scalar_negate(&e, &e);
rustsecp256k1_v0_4_1_gej_set_ge(&pkj, &pk);
rustsecp256k1_v0_4_1_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s);
rustsecp256k1_v0_4_0_ge_set_gej_var(&r, &rj);
if (rustsecp256k1_v0_4_0_ge_is_infinity(&r)) {
rustsecp256k1_v0_4_1_ge_set_gej_var(&r, &rj);
if (rustsecp256k1_v0_4_1_ge_is_infinity(&r)) {
return 0;
}
rustsecp256k1_v0_4_0_fe_normalize_var(&r.y);
return !rustsecp256k1_v0_4_0_fe_is_odd(&r.y) &&
rustsecp256k1_v0_4_0_fe_equal_var(&rx, &r.x);
rustsecp256k1_v0_4_1_fe_normalize_var(&r.y);
return !rustsecp256k1_v0_4_1_fe_is_odd(&r.y) &&
rustsecp256k1_v0_4_1_fe_equal_var(&rx, &r.x);
}
#endif

View File

@ -4,10 +4,10 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H
#define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H
#include "include/secp256k1_schnorrsig.h"
#include "../../../include/secp256k1_schnorrsig.h"
#include "src/modules/schnorrsig/main_impl.h"
static const unsigned char invalid_pubkey_bytes[][32] = {
@ -58,21 +58,21 @@ static const unsigned char invalid_pubkey_bytes[][32] = {
#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0]))
static int rustsecp256k1_v0_4_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
static int rustsecp256k1_v0_4_1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *xonly_pk32,
const unsigned char *algo16, void* data) {
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_1_scalar s;
int *idata = data;
(void)msg32;
(void)key32;
(void)xonly_pk32;
(void)algo16;
rustsecp256k1_v0_4_0_scalar_set_int(&s, *idata);
rustsecp256k1_v0_4_0_scalar_get_b32(nonce32, &s);
rustsecp256k1_v0_4_1_scalar_set_int(&s, *idata);
rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &s);
return 1;
}
static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context *ctx, const rustsecp256k1_v0_4_0_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) {
static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_1_context *ctx, const rustsecp256k1_v0_4_1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) {
int d;
uint64_t iter = 0;
/* Iterate over the possible public keys to verify against (through their corresponding DL d). */
@ -98,10 +98,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context
}
/* Randomly generate messages until all challenges have been hit. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
rustsecp256k1_v0_4_0_scalar e;
rustsecp256k1_v0_4_1_scalar e;
unsigned char msg32[32];
rustsecp256k1_v0_4_0_testrand256(msg32);
rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, sig64, msg32, pk32);
rustsecp256k1_v0_4_1_testrand256(msg32);
rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, sig64, msg32, pk32);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
/* Iterate over the possible valid last 32 bytes in the signature.
@ -110,16 +110,16 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context
for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) {
int expect_valid, valid;
if (s <= EXHAUSTIVE_TEST_ORDER) {
rustsecp256k1_v0_4_0_scalar s_s;
rustsecp256k1_v0_4_0_scalar_set_int(&s_s, s);
rustsecp256k1_v0_4_0_scalar_get_b32(sig64 + 32, &s_s);
rustsecp256k1_v0_4_1_scalar s_s;
rustsecp256k1_v0_4_1_scalar_set_int(&s_s, s);
rustsecp256k1_v0_4_1_scalar_get_b32(sig64 + 32, &s_s);
expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
(s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
} else {
rustsecp256k1_v0_4_0_testrand256(sig64 + 32);
rustsecp256k1_v0_4_1_testrand256(sig64 + 32);
expect_valid = 0;
}
valid = rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
valid = rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
CHECK(valid == expect_valid);
count_valid += valid;
}
@ -134,7 +134,7 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_4_0_context
}
}
static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_4_0_keypair* keypairs, const int* parities) {
static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_4_1_keypair* keypairs, const int* parities) {
int d, k;
uint64_t iter = 0;
/* Loop over keys. */
@ -152,20 +152,20 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context *
if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k;
/* Generate random messages until all challenges have been tried. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
rustsecp256k1_v0_4_0_scalar e;
rustsecp256k1_v0_4_0_testrand256(msg32);
rustsecp256k1_v0_4_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
rustsecp256k1_v0_4_1_scalar e;
rustsecp256k1_v0_4_1_testrand256(msg32);
rustsecp256k1_v0_4_1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
rustsecp256k1_v0_4_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
rustsecp256k1_v0_4_1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
unsigned char expected_s_bytes[32];
rustsecp256k1_v0_4_0_scalar_get_b32(expected_s_bytes, &expected_s);
rustsecp256k1_v0_4_1_scalar_get_b32(expected_s_bytes, &expected_s);
/* Invoke the real function to construct a signature. */
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], rustsecp256k1_v0_4_0_hardened_nonce_function_smallint, &k));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], rustsecp256k1_v0_4_1_hardened_nonce_function_smallint, &k));
/* The first 32 bytes must match the xonly pubkey for the specified k. */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
/* The last 32 bytes must match the expected s value. */
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0);
/* Don't retry other messages that result in the same challenge. */
e_done[e] = 1;
++e_count_done;
@ -175,28 +175,28 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_4_0_context *
}
}
static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_4_0_context *ctx) {
rustsecp256k1_v0_4_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_4_1_context *ctx) {
rustsecp256k1_v0_4_1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
rustsecp256k1_v0_4_1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
int parity[EXHAUSTIVE_TEST_ORDER - 1];
unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
unsigned i;
/* Verify that all invalid_pubkey_bytes are actually invalid. */
for (i = 0; i < NUM_INVALID_KEYS; ++i) {
rustsecp256k1_v0_4_0_xonly_pubkey pk;
CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i]));
rustsecp256k1_v0_4_1_xonly_pubkey pk;
CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i]));
}
/* Construct keypairs and xonly-pubkeys for the entire group. */
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) {
rustsecp256k1_v0_4_0_scalar scalar_i;
rustsecp256k1_v0_4_1_scalar scalar_i;
unsigned char buf[32];
rustsecp256k1_v0_4_0_scalar_set_int(&scalar_i, i);
rustsecp256k1_v0_4_0_scalar_get_b32(buf, &scalar_i);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1]));
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
rustsecp256k1_v0_4_1_scalar_set_int(&scalar_i, i);
rustsecp256k1_v0_4_1_scalar_get_b32(buf, &scalar_i);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1]));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
}
test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity);

View File

@ -4,10 +4,10 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_
#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_
#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_H
#define SECP256K1_MODULE_SCHNORRSIG_TESTS_H
#include "secp256k1_schnorrsig.h"
#include "../../../include/secp256k1_schnorrsig.h"
/* Checks that a bit flip in the n_flip-th argument (that has n_bytes many
* bytes) changes the hash function
@ -15,28 +15,28 @@
void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) {
unsigned char nonces[2][32];
CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1);
rustsecp256k1_v0_4_0_testrand_flip(args[n_flip], n_bytes);
rustsecp256k1_v0_4_1_testrand_flip(args[n_flip], n_bytes);
CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(nonces[0], nonces[1], 32) != 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(nonces[0], nonces[1], 32) != 0);
}
/* Tests for the equality of two sha256 structs. This function only produces a
* correct result if an integer multiple of 64 many bytes have been written
* into the hash functions. */
void test_sha256_eq(const rustsecp256k1_v0_4_0_sha256 *sha1, const rustsecp256k1_v0_4_0_sha256 *sha2) {
void test_sha256_eq(const rustsecp256k1_v0_4_1_sha256 *sha1, const rustsecp256k1_v0_4_1_sha256 *sha2) {
/* Is buffer fully consumed? */
CHECK((sha1->bytes & 0x3F) == 0);
CHECK(sha1->bytes == sha2->bytes);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
}
void run_nonce_function_bip340_tests(void) {
unsigned char tag[13] = "BIP0340/nonce";
unsigned char aux_tag[11] = "BIP0340/aux";
unsigned char algo16[16] = "BIP0340/nonce\0\0\0";
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_0_sha256 sha_optimized;
rustsecp256k1_v0_4_1_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha_optimized;
unsigned char nonce[32];
unsigned char msg[32];
unsigned char key[32];
@ -46,23 +46,23 @@ void run_nonce_function_bip340_tests(void) {
int i;
/* Check that hash initialized by
* rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged has the expected
* rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged has the expected
* state. */
rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, tag, sizeof(tag));
rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged(&sha_optimized);
rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, tag, sizeof(tag));
rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
/* Check that hash initialized by
* rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux has the expected
* rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux has the expected
* state. */
rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag));
rustsecp256k1_v0_4_0_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag));
rustsecp256k1_v0_4_1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
rustsecp256k1_v0_4_0_testrand256(msg);
rustsecp256k1_v0_4_0_testrand256(key);
rustsecp256k1_v0_4_0_testrand256(pk);
rustsecp256k1_v0_4_0_testrand256(aux_rand);
rustsecp256k1_v0_4_1_testrand256(msg);
rustsecp256k1_v0_4_1_testrand256(key);
rustsecp256k1_v0_4_1_testrand256(pk);
rustsecp256k1_v0_4_1_testrand256(aux_rand);
/* Check that a bitflip in an argument results in different nonces. */
args[0] = msg;
@ -102,89 +102,89 @@ void test_schnorrsig_api(void) {
unsigned char sk2[32];
unsigned char sk3[32];
unsigned char msg[32];
rustsecp256k1_v0_4_0_keypair keypairs[3];
rustsecp256k1_v0_4_0_keypair invalid_keypair = { 0 };
rustsecp256k1_v0_4_0_xonly_pubkey pk[3];
rustsecp256k1_v0_4_0_xonly_pubkey zero_pk;
rustsecp256k1_v0_4_1_keypair keypairs[3];
rustsecp256k1_v0_4_1_keypair invalid_keypair = {{ 0 }};
rustsecp256k1_v0_4_1_xonly_pubkey pk[3];
rustsecp256k1_v0_4_1_xonly_pubkey zero_pk;
unsigned char sig[64];
/** setup **/
rustsecp256k1_v0_4_0_context *none = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_4_0_context *sign = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_0_context *vrfy = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_0_context *both = rustsecp256k1_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_1_context *none = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_4_1_context *sign = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_4_1_context *vrfy = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_4_1_context *both = rustsecp256k1_v0_4_1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
int ecount;
rustsecp256k1_v0_4_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_4_0_testrand256(sk1);
rustsecp256k1_v0_4_0_testrand256(sk2);
rustsecp256k1_v0_4_0_testrand256(sk3);
rustsecp256k1_v0_4_0_testrand256(msg);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[0], sk1) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[1], sk2) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypairs[2], sk3) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1);
rustsecp256k1_v0_4_1_testrand256(sk1);
rustsecp256k1_v0_4_1_testrand256(sk2);
rustsecp256k1_v0_4_1_testrand256(sk3);
rustsecp256k1_v0_4_1_testrand256(msg);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[0], sk1) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[1], sk2) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypairs[2], sk3) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1);
memset(&zero_pk, 0, sizeof(zero_pk));
/** main test body **/
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0);
CHECK(ecount == 5);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0);
CHECK(ecount == 6);
ecount = 0;
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(none, sig, msg, &pk[0]) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(none, sig, msg, &pk[0]) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, NULL) == 0);
CHECK(ecount == 5);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0);
CHECK(ecount == 6);
rustsecp256k1_v0_4_0_context_destroy(none);
rustsecp256k1_v0_4_0_context_destroy(sign);
rustsecp256k1_v0_4_0_context_destroy(vrfy);
rustsecp256k1_v0_4_0_context_destroy(both);
rustsecp256k1_v0_4_1_context_destroy(none);
rustsecp256k1_v0_4_1_context_destroy(sign);
rustsecp256k1_v0_4_1_context_destroy(vrfy);
rustsecp256k1_v0_4_1_context_destroy(both);
}
/* Checks that hash initialized by rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged has the
/* Checks that hash initialized by rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged has the
* expected state. */
void test_schnorrsig_sha256_tagged(void) {
char tag[17] = "BIP0340/challenge";
rustsecp256k1_v0_4_0_sha256 sha;
rustsecp256k1_v0_4_0_sha256 sha_optimized;
rustsecp256k1_v0_4_1_sha256 sha;
rustsecp256k1_v0_4_1_sha256 sha_optimized;
rustsecp256k1_v0_4_0_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag));
rustsecp256k1_v0_4_0_schnorrsig_sha256_tagged(&sha_optimized);
rustsecp256k1_v0_4_1_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag));
rustsecp256k1_v0_4_1_schnorrsig_sha256_tagged(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
}
@ -192,26 +192,26 @@ void test_schnorrsig_sha256_tagged(void) {
* Signs the message and checks that it's the same as expected_sig. */
void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) {
unsigned char sig[64];
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_0_xonly_pubkey pk, pk_expected;
rustsecp256k1_v0_4_1_keypair keypair;
rustsecp256k1_v0_4_1_xonly_pubkey pk, pk_expected;
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, expected_sig, 64) == 0);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, expected_sig, 64) == 0);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
CHECK(rustsecp256k1_v0_4_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg, &pk));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
CHECK(rustsecp256k1_v0_4_1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg, &pk));
}
/* Helper function for schnorrsig_bip_vectors
* Checks that both verify and verify_batch (TODO) return the same value as expected. */
void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized, const unsigned char *msg32, const unsigned char *sig, int expected) {
rustsecp256k1_v0_4_0_xonly_pubkey pk;
rustsecp256k1_v0_4_1_xonly_pubkey pk;
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk, pk_serialized));
CHECK(expected == rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg32, &pk));
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk, pk_serialized));
CHECK(expected == rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg32, &pk));
}
/* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See
@ -407,9 +407,9 @@ void test_schnorrsig_bip_vectors(void) {
0xEB, 0x98, 0x98, 0xAE, 0x79, 0xB9, 0x76, 0x87,
0x66, 0xE4, 0xFA, 0xA0, 0x4A, 0x2D, 0x4A, 0x34
};
rustsecp256k1_v0_4_0_xonly_pubkey pk_parsed;
rustsecp256k1_v0_4_1_xonly_pubkey pk_parsed;
/* No need to check the signature of the test vector as parsing the pubkey already fails */
CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_parsed, pk));
CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_parsed, pk));
}
{
/* Test vector 6 */
@ -627,9 +627,9 @@ void test_schnorrsig_bip_vectors(void) {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30
};
rustsecp256k1_v0_4_0_xonly_pubkey pk_parsed;
rustsecp256k1_v0_4_1_xonly_pubkey pk_parsed;
/* No need to check the signature of the test vector as parsing the pubkey already fails */
CHECK(!rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &pk_parsed, pk));
CHECK(!rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &pk_parsed, pk));
}
}
@ -670,24 +670,24 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha
void test_schnorrsig_sign(void) {
unsigned char sk[32];
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_1_keypair keypair;
const unsigned char msg[32] = "this is a msg for a schnorrsig..";
unsigned char sig[64];
unsigned char zeros64[64] = { 0 };
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Test different nonce functions */
memset(sig, 1, sizeof(sig));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
memset(&sig, 1, sizeof(sig));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
CHECK(rustsecp256k1_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
CHECK(rustsecp256k1_v0_4_1_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
}
#define N_SIGS 3
@ -699,66 +699,66 @@ void test_schnorrsig_sign_verify(void) {
unsigned char msg[N_SIGS][32];
unsigned char sig[N_SIGS][64];
size_t i;
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_0_xonly_pubkey pk;
rustsecp256k1_v0_4_0_scalar s;
rustsecp256k1_v0_4_1_keypair keypair;
rustsecp256k1_v0_4_1_xonly_pubkey pk;
rustsecp256k1_v0_4_1_scalar s;
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk));
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
for (i = 0; i < N_SIGS; i++) {
rustsecp256k1_v0_4_0_testrand256(msg[i]);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
rustsecp256k1_v0_4_1_testrand256(msg[i]);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
}
{
/* Flip a few bits in the signature and in the message and check that
* verify and verify_batch (TODO) fail */
size_t sig_idx = rustsecp256k1_v0_4_0_testrand_int(N_SIGS);
size_t byte_idx = rustsecp256k1_v0_4_0_testrand_int(32);
unsigned char xorbyte = rustsecp256k1_v0_4_0_testrand_int(254)+1;
size_t sig_idx = rustsecp256k1_v0_4_1_testrand_int(N_SIGS);
size_t byte_idx = rustsecp256k1_v0_4_1_testrand_int(32);
unsigned char xorbyte = rustsecp256k1_v0_4_1_testrand_int(254)+1;
sig[sig_idx][byte_idx] ^= xorbyte;
CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][byte_idx] ^= xorbyte;
byte_idx = rustsecp256k1_v0_4_0_testrand_int(32);
byte_idx = rustsecp256k1_v0_4_1_testrand_int(32);
sig[sig_idx][32+byte_idx] ^= xorbyte;
CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][32+byte_idx] ^= xorbyte;
byte_idx = rustsecp256k1_v0_4_0_testrand_int(32);
byte_idx = rustsecp256k1_v0_4_1_testrand_int(32);
msg[sig_idx][byte_idx] ^= xorbyte;
CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
msg[sig_idx][byte_idx] ^= xorbyte;
/* Check that above bitflips have been reversed correctly */
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
}
/* Test overflowing s */
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
memset(&sig[0][32], 0xFF, 32);
CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
/* Test negative s */
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
rustsecp256k1_v0_4_0_scalar_set_b32(&s, &sig[0][32], NULL);
rustsecp256k1_v0_4_0_scalar_negate(&s, &s);
rustsecp256k1_v0_4_0_scalar_get_b32(&sig[0][32], &s);
CHECK(!rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
rustsecp256k1_v0_4_1_scalar_set_b32(&s, &sig[0][32], NULL);
rustsecp256k1_v0_4_1_scalar_negate(&s, &s);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig[0][32], &s);
CHECK(!rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
}
#undef N_SIGS
void test_schnorrsig_taproot(void) {
unsigned char sk[32];
rustsecp256k1_v0_4_0_keypair keypair;
rustsecp256k1_v0_4_0_xonly_pubkey internal_pk;
rustsecp256k1_v0_4_1_keypair keypair;
rustsecp256k1_v0_4_1_xonly_pubkey internal_pk;
unsigned char internal_pk_bytes[32];
rustsecp256k1_v0_4_0_xonly_pubkey output_pk;
rustsecp256k1_v0_4_1_xonly_pubkey output_pk;
unsigned char output_pk_bytes[32];
unsigned char tweak[32];
int pk_parity;
@ -766,27 +766,27 @@ void test_schnorrsig_taproot(void) {
unsigned char sig[64];
/* Create output key */
rustsecp256k1_v0_4_0_testrand256(sk);
CHECK(rustsecp256k1_v0_4_0_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
rustsecp256k1_v0_4_1_testrand256(sk);
CHECK(rustsecp256k1_v0_4_1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
/* In actual taproot the tweak would be hash of internal_pk */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1);
/* Key spend */
rustsecp256k1_v0_4_0_testrand256(msg);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
rustsecp256k1_v0_4_1_testrand256(msg);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Verify key spend */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);
CHECK(rustsecp256k1_v0_4_0_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);
CHECK(rustsecp256k1_v0_4_1_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1);
/* Script spend */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1);
/* Verify script spend */
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1);
CHECK(rustsecp256k1_v0_4_0_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1);
CHECK(rustsecp256k1_v0_4_1_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1);
}
void run_schnorrsig_tests(void) {

View File

@ -1,74 +0,0 @@
/***********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef SECP256K1_NUM_H
#define SECP256K1_NUM_H
#ifndef USE_NUM_NONE
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#if defined(USE_NUM_GMP)
#include "num_gmp.h"
#else
#error "Please select num implementation"
#endif
/** Copy a number. */
static void rustsecp256k1_v0_4_0_num_copy(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a);
/** Convert a number's absolute value to a binary big-endian string.
* There must be enough place. */
static void rustsecp256k1_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_4_0_num *a);
/** Set a number to the value of a binary big-endian string. */
static void rustsecp256k1_v0_4_0_num_set_bin(rustsecp256k1_v0_4_0_num *r, const unsigned char *a, unsigned int alen);
/** Compute a modular inverse. The input must be less than the modulus. */
static void rustsecp256k1_v0_4_0_num_mod_inverse(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *m);
/** Compute the jacobi symbol (a|b). b must be positive and odd. */
static int rustsecp256k1_v0_4_0_num_jacobi(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Compare the absolute value of two numbers. */
static int rustsecp256k1_v0_4_0_num_cmp(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Test whether two number are equal (including sign). */
static int rustsecp256k1_v0_4_0_num_eq(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Add two (signed) numbers. */
static void rustsecp256k1_v0_4_0_num_add(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Subtract two (signed) numbers. */
static void rustsecp256k1_v0_4_0_num_sub(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Multiply two (signed) numbers. */
static void rustsecp256k1_v0_4_0_num_mul(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b);
/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
even if r was negative. */
static void rustsecp256k1_v0_4_0_num_mod(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *m);
/** Right-shift the passed number by bits bits. */
static void rustsecp256k1_v0_4_0_num_shift(rustsecp256k1_v0_4_0_num *r, int bits);
/** Check whether a number is zero. */
static int rustsecp256k1_v0_4_0_num_is_zero(const rustsecp256k1_v0_4_0_num *a);
/** Check whether a number is one. */
static int rustsecp256k1_v0_4_0_num_is_one(const rustsecp256k1_v0_4_0_num *a);
/** Check whether a number is strictly negative. */
static int rustsecp256k1_v0_4_0_num_is_neg(const rustsecp256k1_v0_4_0_num *a);
/** Change a number's sign. */
static void rustsecp256k1_v0_4_0_num_negate(rustsecp256k1_v0_4_0_num *r);
#endif
#endif /* SECP256K1_NUM_H */

View File

@ -1,20 +0,0 @@
/***********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef SECP256K1_NUM_REPR_H
#define SECP256K1_NUM_REPR_H
#include <gmp.h>
#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS)
typedef struct {
mp_limb_t data[2*NUM_LIMBS];
int neg;
int limbs;
} rustsecp256k1_v0_4_0_num;
#endif /* SECP256K1_NUM_REPR_H */

View File

@ -1,288 +0,0 @@
/***********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef SECP256K1_NUM_REPR_IMPL_H
#define SECP256K1_NUM_REPR_IMPL_H
#include <string.h>
#include <stdlib.h>
#include <gmp.h>
#include "util.h"
#include "num.h"
#ifdef VERIFY
static void rustsecp256k1_v0_4_0_num_sanity(const rustsecp256k1_v0_4_0_num *a) {
VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0));
}
#else
#define rustsecp256k1_v0_4_0_num_sanity(a) do { } while(0)
#endif
static void rustsecp256k1_v0_4_0_num_copy(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a) {
*r = *a;
}
static void rustsecp256k1_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_4_0_num *a) {
unsigned char tmp[65];
int len = 0;
int shift = 0;
if (a->limbs>1 || a->data[0] != 0) {
len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs);
}
while (shift < len && tmp[shift] == 0) shift++;
VERIFY_CHECK(len-shift <= (int)rlen);
memset(r, 0, rlen - len + shift);
if (len > shift) {
memcpy(r + rlen - len + shift, tmp + shift, len - shift);
}
memset(tmp, 0, sizeof(tmp));
}
static void rustsecp256k1_v0_4_0_num_set_bin(rustsecp256k1_v0_4_0_num *r, const unsigned char *a, unsigned int alen) {
int len;
VERIFY_CHECK(alen > 0);
VERIFY_CHECK(alen <= 64);
len = mpn_set_str(r->data, a, alen, 256);
if (len == 0) {
r->data[0] = 0;
len = 1;
}
VERIFY_CHECK(len <= NUM_LIMBS*2);
r->limbs = len;
r->neg = 0;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void rustsecp256k1_v0_4_0_num_add_abs(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs);
r->limbs = a->limbs;
if (c != 0) {
VERIFY_CHECK(r->limbs < 2*NUM_LIMBS);
r->data[r->limbs++] = c;
}
}
static void rustsecp256k1_v0_4_0_num_sub_abs(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
(void)c;
VERIFY_CHECK(c == 0);
r->limbs = a->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void rustsecp256k1_v0_4_0_num_mod(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *m) {
rustsecp256k1_v0_4_0_num_sanity(r);
rustsecp256k1_v0_4_0_num_sanity(m);
if (r->limbs >= m->limbs) {
mp_limb_t t[2*NUM_LIMBS];
mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs);
memset(t, 0, sizeof(t));
r->limbs = m->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
rustsecp256k1_v0_4_0_num_sub_abs(r, m, r);
r->neg = 0;
}
}
static void rustsecp256k1_v0_4_0_num_mod_inverse(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *m) {
int i;
mp_limb_t g[NUM_LIMBS+1];
mp_limb_t u[NUM_LIMBS+1];
mp_limb_t v[NUM_LIMBS+1];
mp_size_t sn;
mp_size_t gn;
rustsecp256k1_v0_4_0_num_sanity(a);
rustsecp256k1_v0_4_0_num_sanity(m);
/** mpn_gcdext computes: (G,S) = gcdext(U,V), where
* * G = gcd(U,V)
* * G = U*S + V*T
* * U has equal or more limbs than V, and V has no padding
* If we set U to be (a padded version of) a, and V = m:
* G = a*S + m*T
* G = a*S mod m
* Assuming G=1:
* S = 1/a mod m
*/
VERIFY_CHECK(m->limbs <= NUM_LIMBS);
VERIFY_CHECK(m->data[m->limbs-1] != 0);
for (i = 0; i < m->limbs; i++) {
u[i] = (i < a->limbs) ? a->data[i] : 0;
v[i] = m->data[i];
}
sn = NUM_LIMBS+1;
gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs);
(void)gn;
VERIFY_CHECK(gn == 1);
VERIFY_CHECK(g[0] == 1);
r->neg = a->neg ^ m->neg;
if (sn < 0) {
mpn_sub(r->data, m->data, m->limbs, r->data, -sn);
r->limbs = m->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
} else {
r->limbs = sn;
}
memset(g, 0, sizeof(g));
memset(u, 0, sizeof(u));
memset(v, 0, sizeof(v));
}
static int rustsecp256k1_v0_4_0_num_jacobi(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
int ret;
mpz_t ga, gb;
rustsecp256k1_v0_4_0_num_sanity(a);
rustsecp256k1_v0_4_0_num_sanity(b);
VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1));
mpz_inits(ga, gb, NULL);
mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data);
mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data);
if (a->neg) {
mpz_neg(ga, ga);
}
ret = mpz_jacobi(ga, gb);
mpz_clears(ga, gb, NULL);
return ret;
}
static int rustsecp256k1_v0_4_0_num_is_one(const rustsecp256k1_v0_4_0_num *a) {
return (a->limbs == 1 && a->data[0] == 1);
}
static int rustsecp256k1_v0_4_0_num_is_zero(const rustsecp256k1_v0_4_0_num *a) {
return (a->limbs == 1 && a->data[0] == 0);
}
static int rustsecp256k1_v0_4_0_num_is_neg(const rustsecp256k1_v0_4_0_num *a) {
return (a->limbs > 1 || a->data[0] != 0) && a->neg;
}
static int rustsecp256k1_v0_4_0_num_cmp(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
if (a->limbs > b->limbs) {
return 1;
}
if (a->limbs < b->limbs) {
return -1;
}
return mpn_cmp(a->data, b->data, a->limbs);
}
static int rustsecp256k1_v0_4_0_num_eq(const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
if (a->limbs > b->limbs) {
return 0;
}
if (a->limbs < b->limbs) {
return 0;
}
if ((a->neg && !rustsecp256k1_v0_4_0_num_is_zero(a)) != (b->neg && !rustsecp256k1_v0_4_0_num_is_zero(b))) {
return 0;
}
return mpn_cmp(a->data, b->data, a->limbs) == 0;
}
static void rustsecp256k1_v0_4_0_num_subadd(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b, int bneg) {
if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */
r->neg = a->neg;
if (a->limbs >= b->limbs) {
rustsecp256k1_v0_4_0_num_add_abs(r, a, b);
} else {
rustsecp256k1_v0_4_0_num_add_abs(r, b, a);
}
} else {
if (rustsecp256k1_v0_4_0_num_cmp(a, b) > 0) {
r->neg = a->neg;
rustsecp256k1_v0_4_0_num_sub_abs(r, a, b);
} else {
r->neg = b->neg ^ bneg;
rustsecp256k1_v0_4_0_num_sub_abs(r, b, a);
}
}
}
static void rustsecp256k1_v0_4_0_num_add(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
rustsecp256k1_v0_4_0_num_sanity(a);
rustsecp256k1_v0_4_0_num_sanity(b);
rustsecp256k1_v0_4_0_num_subadd(r, a, b, 0);
}
static void rustsecp256k1_v0_4_0_num_sub(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
rustsecp256k1_v0_4_0_num_sanity(a);
rustsecp256k1_v0_4_0_num_sanity(b);
rustsecp256k1_v0_4_0_num_subadd(r, a, b, 1);
}
static void rustsecp256k1_v0_4_0_num_mul(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_num *a, const rustsecp256k1_v0_4_0_num *b) {
mp_limb_t tmp[2*NUM_LIMBS+1];
rustsecp256k1_v0_4_0_num_sanity(a);
rustsecp256k1_v0_4_0_num_sanity(b);
VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1);
if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) {
r->limbs = 1;
r->neg = 0;
r->data[0] = 0;
return;
}
if (a->limbs >= b->limbs) {
mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs);
} else {
mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs);
}
r->limbs = a->limbs + b->limbs;
if (r->limbs > 1 && tmp[r->limbs - 1]==0) {
r->limbs--;
}
VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS);
mpn_copyi(r->data, tmp, r->limbs);
r->neg = a->neg ^ b->neg;
memset(tmp, 0, sizeof(tmp));
}
static void rustsecp256k1_v0_4_0_num_shift(rustsecp256k1_v0_4_0_num *r, int bits) {
if (bits % GMP_NUMB_BITS) {
/* Shift within limbs. */
mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
}
if (bits >= GMP_NUMB_BITS) {
int i;
/* Shift full limbs. */
for (i = 0; i < r->limbs; i++) {
int index = i + (bits / GMP_NUMB_BITS);
if (index < r->limbs && index < 2*NUM_LIMBS) {
r->data[i] = r->data[index];
} else {
r->data[i] = 0;
}
}
}
while (r->limbs>1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void rustsecp256k1_v0_4_0_num_negate(rustsecp256k1_v0_4_0_num *r) {
r->neg ^= 1;
}
#endif /* SECP256K1_NUM_REPR_IMPL_H */

View File

@ -1,24 +0,0 @@
/***********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef SECP256K1_NUM_IMPL_H
#define SECP256K1_NUM_IMPL_H
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "num.h"
#if defined(USE_NUM_GMP)
#include "num_gmp_impl.h"
#elif defined(USE_NUM_NONE)
/* Nothing. */
#else
#error "Please select num implementation"
#endif
#endif /* SECP256K1_NUM_IMPL_H */

View File

@ -7,7 +7,6 @@
#ifndef SECP256K1_SCALAR_H
#define SECP256K1_SCALAR_H
#include "num.h"
#include "util.h"
#if defined HAVE_CONFIG_H
@ -25,93 +24,82 @@
#endif
/** Clear a scalar to prevent the leak of sensitive data. */
static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r);
static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r);
/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count);
static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count);
/** Access bits from a scalar. Not constant time. */
static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count);
static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count);
/** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`.
* In: bin: pointer to a 32-byte array.
* Out: r: scalar to be set.
* overflow: non-zero if the scalar was bigger or equal to `n` before reduction, zero otherwise (can be NULL).
*/
static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin, int *overflow);
static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin, int *overflow);
/** Set a scalar from a big endian byte array and returns 1 if it is a valid
* seckey and 0 otherwise. */
static int rustsecp256k1_v0_4_0_scalar_set_b32_seckey(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin);
static int rustsecp256k1_v0_4_1_scalar_set_b32_seckey(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin);
/** Set a scalar to an unsigned integer. */
static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v);
static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v);
/** Convert a scalar to a byte array. */
static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a);
static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a);
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b);
static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b);
/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag);
static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag);
/** Multiply two scalars (modulo the group order). */
static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b);
static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b);
/** Shift a scalar right by some amount strictly between 0 and 16, returning
* the low bits that were shifted off */
static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n);
/** Compute the square of a scalar (modulo the group order). */
static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a);
static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n);
/** Compute the inverse of a scalar (modulo the group order). */
static void rustsecp256k1_v0_4_0_scalar_inverse(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a);
static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a);
/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */
static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a);
static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a);
/** Compute the complement of a scalar (modulo the group order). */
static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a);
static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a);
/** Check whether a scalar equals zero. */
static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a);
static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a);
/** Check whether a scalar equals one. */
static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a);
static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a);
/** Check whether a scalar, considered as an nonnegative integer, is even. */
static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a);
static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a);
/** Check whether a scalar is higher than the group order divided by 2. */
static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a);
static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a);
/** Conditionally negate a number, in constant time.
* Returns -1 if the number was negated, 1 otherwise */
static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *a, int flag);
#ifndef USE_NUM_NONE
/** Convert a scalar to a number. */
static void rustsecp256k1_v0_4_0_scalar_get_num(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_scalar *a);
/** Get the order of the group as a number. */
static void rustsecp256k1_v0_4_0_scalar_order_get_num(rustsecp256k1_v0_4_0_num *r);
#endif
static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *a, int flag);
/** Compare two scalars. */
static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b);
static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b);
/** Find r1 and r2 such that r1+r2*2^128 = k. */
static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k);
static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k);
/** Find r1 and r2 such that r1+r2*lambda = k,
* where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_4_0_ge_mul_lambda). */
static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k);
* where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_4_1_ge_mul_lambda). */
static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k);
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift);
static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag);
static void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag);
#endif /* SECP256K1_SCALAR_H */

View File

@ -12,7 +12,7 @@
/** A scalar modulo the group order of the secp256k1 curve. */
typedef struct {
uint64_t d[4];
} rustsecp256k1_v0_4_0_scalar;
} rustsecp256k1_v0_4_1_scalar;
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}}

View File

@ -7,6 +7,8 @@
#ifndef SECP256K1_SCALAR_REPR_IMPL_H
#define SECP256K1_SCALAR_REPR_IMPL_H
#include "modinv64_impl.h"
/* Limbs of the secp256k1 order. */
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
@ -24,37 +26,37 @@
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) {
r->d[0] = 0;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) {
r->d[0] = v;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
if ((offset + count - 1) >> 6 == offset >> 6) {
return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count);
return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count);
} else {
VERIFY_CHECK((offset >> 6) + 1 < 4);
return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
}
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
@ -66,7 +68,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rus
return yes;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_4_0_scalar *r, unsigned int overflow) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_reduce(rustsecp256k1_v0_4_1_scalar *r, unsigned int overflow) {
uint128_t t;
VERIFY_CHECK(overflow <= 1);
t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
@ -80,7 +82,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_
return overflow;
}
static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
int overflow;
uint128_t t = (uint128_t)a->d[0] + b->d[0];
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
@ -90,13 +92,13 @@ static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[3] + b->d[3];
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
overflow = t + rustsecp256k1_v0_4_0_scalar_check_overflow(r);
overflow = t + rustsecp256k1_v0_4_1_scalar_check_overflow(r);
VERIFY_CHECK(overflow == 0 || overflow == 1);
rustsecp256k1_v0_4_0_scalar_reduce(r, overflow);
rustsecp256k1_v0_4_1_scalar_reduce(r, overflow);
return overflow;
}
static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) {
static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) {
uint128_t t;
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
@ -110,35 +112,35 @@ static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r,
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
#ifdef VERIFY
VERIFY_CHECK((t >> 64) == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0);
#endif
}
static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) {
static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
over = rustsecp256k1_v0_4_0_scalar_reduce(r, rustsecp256k1_v0_4_0_scalar_check_overflow(r));
over = rustsecp256k1_v0_4_1_scalar_reduce(r, rustsecp256k1_v0_4_1_scalar_check_overflow(r));
if (overflow) {
*overflow = over;
}
}
static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) {
static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) {
bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) {
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}
static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_4_0_scalar_is_zero(a) == 0);
static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_4_1_scalar_is_zero(a) == 0);
uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
r->d[0] = t & nonzero; t >>= 64;
t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
@ -149,11 +151,11 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c
r->d[3] = t & nonzero;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) {
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}
static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) {
static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_H_3);
@ -165,11 +167,11 @@ static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar
return yes;
}
static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) {
static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) {
/* If we are flag = 0, mask = 00...00 and this is a no-op;
* if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_0_scalar_negate */
* if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_1_scalar_negate */
uint64_t mask = !flag - 1;
uint64_t nonzero = (rustsecp256k1_v0_4_0_scalar_is_zero(r) != 0) - 1;
uint64_t nonzero = (rustsecp256k1_v0_4_1_scalar_is_zero(r) != 0) - 1;
uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
r->d[0] = t & nonzero; t >>= 64;
t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
@ -212,28 +214,6 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *
VERIFY_CHECK(c1 >= th); \
}
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
#define muladd2(a,b) { \
uint64_t tl, th, th2, tl2; \
{ \
uint128_t t = (uint128_t)a * b; \
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
#define sumadd(a) { \
unsigned int over; \
@ -267,7 +247,7 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *
VERIFY_CHECK(c2 == 0); \
}
static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *r, const uint64_t *l) {
static void rustsecp256k1_v0_4_1_scalar_reduce_512(rustsecp256k1_v0_4_1_scalar *r, const uint64_t *l) {
#ifdef USE_ASM_X86_64
/* Reduce 512 bits into 385. */
uint64_t m0, m1, m2, m3, m4, m5, m6;
@ -573,10 +553,10 @@ static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *
#endif
/* Final reduction of r. */
rustsecp256k1_v0_4_0_scalar_reduce(r, c + rustsecp256k1_v0_4_0_scalar_check_overflow(r));
rustsecp256k1_v0_4_1_scalar_reduce(r, c + rustsecp256k1_v0_4_1_scalar_check_overflow(r));
}
static void rustsecp256k1_v0_4_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static void rustsecp256k1_v0_4_1_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
#ifdef USE_ASM_X86_64
const uint64_t *pb = b->d;
__asm__ __volatile__(
@ -743,158 +723,20 @@ static void rustsecp256k1_v0_4_0_scalar_mul_512(uint64_t l[8], const rustsecp256
#endif
}
static void rustsecp256k1_v0_4_0_scalar_sqr_512(uint64_t l[8], const rustsecp256k1_v0_4_0_scalar *a) {
#ifdef USE_ASM_X86_64
__asm__ __volatile__(
/* Preload */
"movq 0(%%rdi), %%r11\n"
"movq 8(%%rdi), %%r12\n"
"movq 16(%%rdi), %%r13\n"
"movq 24(%%rdi), %%r14\n"
/* (rax,rdx) = a0 * a0 */
"movq %%r11, %%rax\n"
"mulq %%r11\n"
/* Extract l0 */
"movq %%rax, 0(%%rsi)\n"
/* (r8,r9,r10) = (rdx,0) */
"movq %%rdx, %%r8\n"
"xorq %%r9, %%r9\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += 2 * a0 * a1 */
"movq %%r11, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l1 */
"movq %%r8, 8(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += 2 * a0 * a2 */
"movq %%r11, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += a1 * a1 */
"movq %%r12, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l2 */
"movq %%r9, 16(%%rsi)\n"
"xorq %%r9, %%r9\n"
/* (r10,r8,r9) += 2 * a0 * a3 */
"movq %%r11, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += 2 * a1 * a2 */
"movq %%r12, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* Extract l3 */
"movq %%r10, 24(%%rsi)\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += 2 * a1 * a3 */
"movq %%r12, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += a2 * a2 */
"movq %%r13, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l4 */
"movq %%r8, 32(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += 2 * a2 * a3 */
"movq %%r13, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l5 */
"movq %%r9, 40(%%rsi)\n"
/* (r10,r8) += a3 * a3 */
"movq %%r14, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
/* Extract l6 */
"movq %%r10, 48(%%rsi)\n"
/* Extract l7 */
"movq %%r8, 56(%%rsi)\n"
:
: "S"(l), "D"(a->d)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
#else
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
muladd2(a->d[0], a->d[1]);
extract(l[1]);
muladd2(a->d[0], a->d[2]);
muladd(a->d[1], a->d[1]);
extract(l[2]);
muladd2(a->d[0], a->d[3]);
muladd2(a->d[1], a->d[2]);
extract(l[3]);
muladd2(a->d[1], a->d[3]);
muladd(a->d[2], a->d[2]);
extract(l[4]);
muladd2(a->d[2], a->d[3]);
extract(l[5]);
muladd_fast(a->d[3], a->d[3]);
extract_fast(l[6]);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
#endif
}
#undef sumadd
#undef sumadd_fast
#undef muladd
#undef muladd_fast
#undef muladd2
#undef extract
#undef extract_fast
static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
uint64_t l[8];
rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_0_scalar_reduce_512(r, l);
rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_1_scalar_reduce_512(r, l);
}
static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) {
static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
@ -906,13 +748,7 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i
return ret;
}
static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
uint64_t l[8];
rustsecp256k1_v0_4_0_scalar_sqr_512(l, a);
rustsecp256k1_v0_4_0_scalar_reduce_512(r, l);
}
static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) {
static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = 0;
@ -923,17 +759,17 @@ static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r
r2->d[3] = 0;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift) {
uint64_t l[8];
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
VERIFY_CHECK(shift >= 256);
rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b);
shiftlimbs = shift >> 6;
shiftlow = shift & 0x3F;
shifthigh = 64 - shiftlow;
@ -941,10 +777,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp2
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
rustsecp256k1_v0_4_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
rustsecp256k1_v0_4_1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) {
uint64_t mask0, mask1;
VG_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = flag + ~((uint64_t)0);
@ -955,4 +791,78 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
}
static void rustsecp256k1_v0_4_1_scalar_from_signed62(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a) {
const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
/* The output from rustsecp256k1_v0_4_1_modinv64{_var} should be normalized to range [0,modulus), and
* have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
*/
VERIFY_CHECK(a0 >> 62 == 0);
VERIFY_CHECK(a1 >> 62 == 0);
VERIFY_CHECK(a2 >> 62 == 0);
VERIFY_CHECK(a3 >> 62 == 0);
VERIFY_CHECK(a4 >> 8 == 0);
r->d[0] = a0 | a1 << 62;
r->d[1] = a1 >> 2 | a2 << 60;
r->d[2] = a2 >> 4 | a3 << 58;
r->d[3] = a3 >> 6 | a4 << 56;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0);
#endif
}
static void rustsecp256k1_v0_4_1_scalar_to_signed62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_scalar *a) {
const uint64_t M62 = UINT64_MAX >> 2;
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(a) == 0);
#endif
r->v[0] = a0 & M62;
r->v[1] = (a0 >> 62 | a1 << 2) & M62;
r->v[2] = (a1 >> 60 | a2 << 4) & M62;
r->v[3] = (a2 >> 58 | a3 << 6) & M62;
r->v[4] = a3 >> 56;
}
static const rustsecp256k1_v0_4_1_modinv64_modinfo rustsecp256k1_v0_4_1_const_modinfo_scalar = {
{{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
0x34F20099AA774EC1LL
};
static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
rustsecp256k1_v0_4_1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x);
#endif
rustsecp256k1_v0_4_1_scalar_to_signed62(&s, x);
rustsecp256k1_v0_4_1_modinv64(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar);
rustsecp256k1_v0_4_1_scalar_from_signed62(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in);
#endif
}
static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
rustsecp256k1_v0_4_1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x);
#endif
rustsecp256k1_v0_4_1_scalar_to_signed62(&s, x);
rustsecp256k1_v0_4_1_modinv64_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar);
rustsecp256k1_v0_4_1_scalar_from_signed62(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) {
return !(a->d[0] & 1);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -12,7 +12,7 @@
/** A scalar modulo the group order of the secp256k1 curve. */
typedef struct {
uint32_t d[8];
} rustsecp256k1_v0_4_0_scalar;
} rustsecp256k1_v0_4_1_scalar;
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}}

View File

@ -7,6 +7,8 @@
#ifndef SECP256K1_SCALAR_REPR_IMPL_H
#define SECP256K1_SCALAR_REPR_IMPL_H
#include "modinv32_impl.h"
/* Limbs of the secp256k1 order. */
#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
@ -34,7 +36,7 @@
#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) {
r->d[0] = 0;
r->d[1] = 0;
r->d[2] = 0;
@ -45,7 +47,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_
r->d[7] = 0;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) {
r->d[0] = v;
r->d[1] = 0;
r->d[2] = 0;
@ -56,23 +58,23 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v
r->d[7] = 0;
}
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
}
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
if ((offset + count - 1) >> 5 == offset >> 5) {
return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count);
return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count);
} else {
VERIFY_CHECK((offset >> 5) + 1 < 8);
return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
}
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */
@ -90,7 +92,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rus
return yes;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_4_0_scalar *r, uint32_t overflow) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_reduce(rustsecp256k1_v0_4_1_scalar *r, uint32_t overflow) {
uint64_t t;
VERIFY_CHECK(overflow <= 1);
t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
@ -112,7 +114,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_reduce(rustsecp256k1_v0_
return overflow;
}
static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
int overflow;
uint64_t t = (uint64_t)a->d[0] + b->d[0];
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
@ -130,13 +132,13 @@ static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const
r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
t += (uint64_t)a->d[7] + b->d[7];
r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
overflow = t + rustsecp256k1_v0_4_0_scalar_check_overflow(r);
overflow = t + rustsecp256k1_v0_4_1_scalar_check_overflow(r);
VERIFY_CHECK(overflow == 0 || overflow == 1);
rustsecp256k1_v0_4_0_scalar_reduce(r, overflow);
rustsecp256k1_v0_4_1_scalar_reduce(r, overflow);
return overflow;
}
static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) {
static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) {
uint64_t t;
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
@ -158,11 +160,11 @@ static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r,
r->d[7] = t & 0xFFFFFFFFULL;
#ifdef VERIFY
VERIFY_CHECK((t >> 32) == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0);
#endif
}
static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) {
static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
@ -172,13 +174,13 @@ static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r,
r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
over = rustsecp256k1_v0_4_0_scalar_reduce(r, rustsecp256k1_v0_4_0_scalar_check_overflow(r));
over = rustsecp256k1_v0_4_1_scalar_reduce(r, rustsecp256k1_v0_4_1_scalar_check_overflow(r));
if (overflow) {
*overflow = over;
}
}
static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) {
static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) {
bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7];
bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6];
bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5];
@ -189,12 +191,12 @@ static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustse
bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) {
return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
}
static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_0_scalar_is_zero(a) == 0);
static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) {
uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_1_scalar_is_zero(a) == 0);
uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
@ -213,11 +215,11 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c
r->d[7] = t & nonzero;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) {
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
}
static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) {
static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[7] < SECP256K1_N_H_7);
@ -235,11 +237,11 @@ static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar
return yes;
}
static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) {
static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) {
/* If we are flag = 0, mask = 00...00 and this is a no-op;
* if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_0_scalar_negate */
* if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_4_1_scalar_negate */
uint32_t mask = !flag - 1;
uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_0_scalar_is_zero(r) == 0);
uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_4_1_scalar_is_zero(r) == 0);
uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
@ -291,28 +293,6 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *
VERIFY_CHECK(c1 >= th); \
}
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
#define muladd2(a,b) { \
uint32_t tl, th, th2, tl2; \
{ \
uint64_t t = (uint64_t)a * b; \
th = t >> 32; /* at most 0xFFFFFFFE */ \
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
#define sumadd(a) { \
unsigned int over; \
@ -346,7 +326,7 @@ static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *
VERIFY_CHECK(c2 == 0); \
}
static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *r, const uint32_t *l) {
static void rustsecp256k1_v0_4_1_scalar_reduce_512(rustsecp256k1_v0_4_1_scalar *r, const uint32_t *l) {
uint64_t c;
uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
@ -485,10 +465,10 @@ static void rustsecp256k1_v0_4_0_scalar_reduce_512(rustsecp256k1_v0_4_0_scalar *
r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
/* Final reduction of r. */
rustsecp256k1_v0_4_0_scalar_reduce(r, c + rustsecp256k1_v0_4_0_scalar_check_overflow(r));
rustsecp256k1_v0_4_1_scalar_reduce(r, c + rustsecp256k1_v0_4_1_scalar_check_overflow(r));
}
static void rustsecp256k1_v0_4_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static void rustsecp256k1_v0_4_1_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
/* 96 bit accumulator. */
uint32_t c0 = 0, c1 = 0, c2 = 0;
@ -576,81 +556,20 @@ static void rustsecp256k1_v0_4_0_scalar_mul_512(uint32_t *l, const rustsecp256k1
l[15] = c0;
}
static void rustsecp256k1_v0_4_0_scalar_sqr_512(uint32_t *l, const rustsecp256k1_v0_4_0_scalar *a) {
/* 96 bit accumulator. */
uint32_t c0 = 0, c1 = 0, c2 = 0;
/* l[0..15] = a[0..7]^2. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
muladd2(a->d[0], a->d[1]);
extract(l[1]);
muladd2(a->d[0], a->d[2]);
muladd(a->d[1], a->d[1]);
extract(l[2]);
muladd2(a->d[0], a->d[3]);
muladd2(a->d[1], a->d[2]);
extract(l[3]);
muladd2(a->d[0], a->d[4]);
muladd2(a->d[1], a->d[3]);
muladd(a->d[2], a->d[2]);
extract(l[4]);
muladd2(a->d[0], a->d[5]);
muladd2(a->d[1], a->d[4]);
muladd2(a->d[2], a->d[3]);
extract(l[5]);
muladd2(a->d[0], a->d[6]);
muladd2(a->d[1], a->d[5]);
muladd2(a->d[2], a->d[4]);
muladd(a->d[3], a->d[3]);
extract(l[6]);
muladd2(a->d[0], a->d[7]);
muladd2(a->d[1], a->d[6]);
muladd2(a->d[2], a->d[5]);
muladd2(a->d[3], a->d[4]);
extract(l[7]);
muladd2(a->d[1], a->d[7]);
muladd2(a->d[2], a->d[6]);
muladd2(a->d[3], a->d[5]);
muladd(a->d[4], a->d[4]);
extract(l[8]);
muladd2(a->d[2], a->d[7]);
muladd2(a->d[3], a->d[6]);
muladd2(a->d[4], a->d[5]);
extract(l[9]);
muladd2(a->d[3], a->d[7]);
muladd2(a->d[4], a->d[6]);
muladd(a->d[5], a->d[5]);
extract(l[10]);
muladd2(a->d[4], a->d[7]);
muladd2(a->d[5], a->d[6]);
extract(l[11]);
muladd2(a->d[5], a->d[7]);
muladd(a->d[6], a->d[6]);
extract(l[12]);
muladd2(a->d[6], a->d[7]);
extract(l[13]);
muladd_fast(a->d[7], a->d[7]);
extract_fast(l[14]);
VERIFY_CHECK(c1 == 0);
l[15] = c0;
}
#undef sumadd
#undef sumadd_fast
#undef muladd
#undef muladd_fast
#undef muladd2
#undef extract
#undef extract_fast
static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
uint32_t l[16];
rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_0_scalar_reduce_512(r, l);
rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_1_scalar_reduce_512(r, l);
}
static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) {
static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
@ -666,13 +585,7 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i
return ret;
}
static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
uint32_t l[16];
rustsecp256k1_v0_4_0_scalar_sqr_512(l, a);
rustsecp256k1_v0_4_0_scalar_reduce_512(r, l);
}
static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) {
static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = k->d[2];
@ -691,17 +604,17 @@ static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r
r2->d[7] = 0;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b, unsigned int shift) {
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_mul_shift_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b, unsigned int shift) {
uint32_t l[16];
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
VERIFY_CHECK(shift >= 256);
rustsecp256k1_v0_4_0_scalar_mul_512(l, a, b);
rustsecp256k1_v0_4_1_scalar_mul_512(l, a, b);
shiftlimbs = shift >> 5;
shiftlow = shift & 0x1F;
shifthigh = 32 - shiftlow;
@ -713,10 +626,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_mul_shift_var(rustsecp2
r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
rustsecp256k1_v0_4_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
rustsecp256k1_v0_4_1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) {
uint32_t mask0, mask1;
VG_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = flag + ~((uint32_t)0);
@ -731,4 +644,92 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4
r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1);
}
static void rustsecp256k1_v0_4_1_scalar_from_signed30(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_modinv32_signed30 *a) {
const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4],
a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8];
/* The output from rustsecp256k1_v0_4_1_modinv32{_var} should be normalized to range [0,modulus), and
* have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8).
*/
VERIFY_CHECK(a0 >> 30 == 0);
VERIFY_CHECK(a1 >> 30 == 0);
VERIFY_CHECK(a2 >> 30 == 0);
VERIFY_CHECK(a3 >> 30 == 0);
VERIFY_CHECK(a4 >> 30 == 0);
VERIFY_CHECK(a5 >> 30 == 0);
VERIFY_CHECK(a6 >> 30 == 0);
VERIFY_CHECK(a7 >> 30 == 0);
VERIFY_CHECK(a8 >> 16 == 0);
r->d[0] = a0 | a1 << 30;
r->d[1] = a1 >> 2 | a2 << 28;
r->d[2] = a2 >> 4 | a3 << 26;
r->d[3] = a3 >> 6 | a4 << 24;
r->d[4] = a4 >> 8 | a5 << 22;
r->d[5] = a5 >> 10 | a6 << 20;
r->d[6] = a6 >> 12 | a7 << 18;
r->d[7] = a7 >> 14 | a8 << 16;
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0);
#endif
}
static void rustsecp256k1_v0_4_1_scalar_to_signed30(rustsecp256k1_v0_4_1_modinv32_signed30 *r, const rustsecp256k1_v0_4_1_scalar *a) {
const uint32_t M30 = UINT32_MAX >> 2;
const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3],
a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7];
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(a) == 0);
#endif
r->v[0] = a0 & M30;
r->v[1] = (a0 >> 30 | a1 << 2) & M30;
r->v[2] = (a1 >> 28 | a2 << 4) & M30;
r->v[3] = (a2 >> 26 | a3 << 6) & M30;
r->v[4] = (a3 >> 24 | a4 << 8) & M30;
r->v[5] = (a4 >> 22 | a5 << 10) & M30;
r->v[6] = (a5 >> 20 | a6 << 12) & M30;
r->v[7] = (a6 >> 18 | a7 << 14) & M30;
r->v[8] = a7 >> 16;
}
static const rustsecp256k1_v0_4_1_modinv32_modinfo rustsecp256k1_v0_4_1_const_modinfo_scalar = {
{{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}},
0x2A774EC1L
};
static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
rustsecp256k1_v0_4_1_modinv32_signed30 s;
#ifdef VERIFY
int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x);
#endif
rustsecp256k1_v0_4_1_scalar_to_signed30(&s, x);
rustsecp256k1_v0_4_1_modinv32(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar);
rustsecp256k1_v0_4_1_scalar_from_signed30(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in);
#endif
}
static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
rustsecp256k1_v0_4_1_modinv32_signed30 s;
#ifdef VERIFY
int zero_in = rustsecp256k1_v0_4_1_scalar_is_zero(x);
#endif
rustsecp256k1_v0_4_1_scalar_to_signed30(&s, x);
rustsecp256k1_v0_4_1_modinv32_var(&s, &rustsecp256k1_v0_4_1_const_modinfo_scalar);
rustsecp256k1_v0_4_1_scalar_from_signed30(r, &s);
#ifdef VERIFY
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) {
return !(a->d[0] & 1);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -28,232 +28,13 @@
#error "Please select wide multiplication implementation"
#endif
static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
#ifndef USE_NUM_NONE
static void rustsecp256k1_v0_4_0_scalar_get_num(rustsecp256k1_v0_4_0_num *r, const rustsecp256k1_v0_4_0_scalar *a) {
unsigned char c[32];
rustsecp256k1_v0_4_0_scalar_get_b32(c, a);
rustsecp256k1_v0_4_0_num_set_bin(r, c, 32);
}
/** secp256k1 curve order, see rustsecp256k1_v0_4_0_ecdsa_const_order_as_fe in ecdsa_impl.h */
static void rustsecp256k1_v0_4_0_scalar_order_get_num(rustsecp256k1_v0_4_0_num *r) {
#if defined(EXHAUSTIVE_TEST_ORDER)
static const unsigned char order[32] = {
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
};
#else
static const unsigned char order[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
};
#endif
rustsecp256k1_v0_4_0_num_set_bin(r, order, 32);
}
#endif
static int rustsecp256k1_v0_4_0_scalar_set_b32_seckey(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *bin) {
static int rustsecp256k1_v0_4_1_scalar_set_b32_seckey(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *bin) {
int overflow;
rustsecp256k1_v0_4_0_scalar_set_b32(r, bin, &overflow);
return (!overflow) & (!rustsecp256k1_v0_4_0_scalar_is_zero(r));
}
static void rustsecp256k1_v0_4_0_scalar_inverse(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *x) {
#if defined(EXHAUSTIVE_TEST_ORDER)
int i;
*r = 0;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i;
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0);
}
#else
rustsecp256k1_v0_4_0_scalar *t;
int i;
/* First compute xN as x ^ (2^N - 1) for some values of N,
* and uM as x ^ M for some values of M. */
rustsecp256k1_v0_4_0_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
rustsecp256k1_v0_4_0_scalar u2, u5, u9, u11, u13;
rustsecp256k1_v0_4_0_scalar_sqr(&u2, x);
rustsecp256k1_v0_4_0_scalar_mul(&x2, &u2, x);
rustsecp256k1_v0_4_0_scalar_mul(&u5, &u2, &x2);
rustsecp256k1_v0_4_0_scalar_mul(&x3, &u5, &u2);
rustsecp256k1_v0_4_0_scalar_mul(&u9, &x3, &u2);
rustsecp256k1_v0_4_0_scalar_mul(&u11, &u9, &u2);
rustsecp256k1_v0_4_0_scalar_mul(&u13, &u11, &u2);
rustsecp256k1_v0_4_0_scalar_sqr(&x6, &u13);
rustsecp256k1_v0_4_0_scalar_sqr(&x6, &x6);
rustsecp256k1_v0_4_0_scalar_mul(&x6, &x6, &u11);
rustsecp256k1_v0_4_0_scalar_sqr(&x8, &x6);
rustsecp256k1_v0_4_0_scalar_sqr(&x8, &x8);
rustsecp256k1_v0_4_0_scalar_mul(&x8, &x8, &x2);
rustsecp256k1_v0_4_0_scalar_sqr(&x14, &x8);
for (i = 0; i < 5; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&x14, &x14);
}
rustsecp256k1_v0_4_0_scalar_mul(&x14, &x14, &x6);
rustsecp256k1_v0_4_0_scalar_sqr(&x28, &x14);
for (i = 0; i < 13; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&x28, &x28);
}
rustsecp256k1_v0_4_0_scalar_mul(&x28, &x28, &x14);
rustsecp256k1_v0_4_0_scalar_sqr(&x56, &x28);
for (i = 0; i < 27; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&x56, &x56);
}
rustsecp256k1_v0_4_0_scalar_mul(&x56, &x56, &x28);
rustsecp256k1_v0_4_0_scalar_sqr(&x112, &x56);
for (i = 0; i < 55; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&x112, &x112);
}
rustsecp256k1_v0_4_0_scalar_mul(&x112, &x112, &x56);
rustsecp256k1_v0_4_0_scalar_sqr(&x126, &x112);
for (i = 0; i < 13; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(&x126, &x126);
}
rustsecp256k1_v0_4_0_scalar_mul(&x126, &x126, &x14);
/* Then accumulate the final result (t starts at x126). */
t = &x126;
for (i = 0; i < 3; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 4; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 5; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 00 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 6; i++) { /* 00 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 3; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 000 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 10; i++) { /* 0000000 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 9; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x8); /* 11111111 */
for (i = 0; i < 5; i++) { /* 0 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 5; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 6; i++) { /* 00 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 10; i++) { /* 000000 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) {
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00000 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) { /* 00 */
rustsecp256k1_v0_4_0_scalar_sqr(t, t);
}
rustsecp256k1_v0_4_0_scalar_mul(r, t, &x6); /* 111111 */
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a) {
return !(a->d[0] & 1);
}
#endif
static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *x) {
#if defined(USE_SCALAR_INV_BUILTIN)
rustsecp256k1_v0_4_0_scalar_inverse(r, x);
#elif defined(USE_SCALAR_INV_NUM)
unsigned char b[32];
rustsecp256k1_v0_4_0_num n, m;
rustsecp256k1_v0_4_0_scalar t = *x;
rustsecp256k1_v0_4_0_scalar_get_b32(b, &t);
rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_4_0_scalar_order_get_num(&m);
rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m);
rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n);
rustsecp256k1_v0_4_0_scalar_set_b32(r, b, NULL);
/* Verify that the inverse was computed correctly, without GMP code. */
rustsecp256k1_v0_4_0_scalar_mul(&t, &t, r);
CHECK(rustsecp256k1_v0_4_0_scalar_is_one(&t));
#else
#error "Please select scalar inverse implementation"
#endif
rustsecp256k1_v0_4_1_scalar_set_b32(r, bin, &overflow);
return (!overflow) & (!rustsecp256k1_v0_4_1_scalar_is_zero(r));
}
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
@ -272,7 +53,7 @@ static void rustsecp256k1_v0_4_0_scalar_inverse_var(rustsecp256k1_v0_4_0_scalar
* nontrivial to get full test coverage for the exhaustive tests. We therefore
* (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/
static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) {
static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) {
*r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
*r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
}
@ -280,13 +61,13 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar
/**
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
* lambda is: */
static const rustsecp256k1_v0_4_0_scalar rustsecp256k1_v0_4_0_const_lambda = SECP256K1_SCALAR_CONST(
static const rustsecp256k1_v0_4_1_scalar rustsecp256k1_v0_4_1_const_lambda = SECP256K1_SCALAR_CONST(
0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
);
#ifdef VERIFY
static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_v0_4_0_scalar *r1, const rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k);
static void rustsecp256k1_v0_4_1_scalar_split_lambda_verify(const rustsecp256k1_v0_4_1_scalar *r1, const rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k);
#endif
/*
@ -339,44 +120,44 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_
*
* See proof below.
*/
static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) {
rustsecp256k1_v0_4_0_scalar c1, c2;
static const rustsecp256k1_v0_4_0_scalar minus_b1 = SECP256K1_SCALAR_CONST(
static void rustsecp256k1_v0_4_1_scalar_split_lambda(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) {
rustsecp256k1_v0_4_1_scalar c1, c2;
static const rustsecp256k1_v0_4_1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
);
static const rustsecp256k1_v0_4_0_scalar minus_b2 = SECP256K1_SCALAR_CONST(
static const rustsecp256k1_v0_4_1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
);
static const rustsecp256k1_v0_4_0_scalar g1 = SECP256K1_SCALAR_CONST(
static const rustsecp256k1_v0_4_1_scalar g1 = SECP256K1_SCALAR_CONST(
0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
);
static const rustsecp256k1_v0_4_0_scalar g2 = SECP256K1_SCALAR_CONST(
static const rustsecp256k1_v0_4_1_scalar g2 = SECP256K1_SCALAR_CONST(
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
);
VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k);
/* these _var calls are constant time since the shift amount is constant */
rustsecp256k1_v0_4_0_scalar_mul_shift_var(&c1, k, &g1, 384);
rustsecp256k1_v0_4_0_scalar_mul_shift_var(&c2, k, &g2, 384);
rustsecp256k1_v0_4_0_scalar_mul(&c1, &c1, &minus_b1);
rustsecp256k1_v0_4_0_scalar_mul(&c2, &c2, &minus_b2);
rustsecp256k1_v0_4_0_scalar_add(r2, &c1, &c2);
rustsecp256k1_v0_4_0_scalar_mul(r1, r2, &rustsecp256k1_v0_4_0_const_lambda);
rustsecp256k1_v0_4_0_scalar_negate(r1, r1);
rustsecp256k1_v0_4_0_scalar_add(r1, r1, k);
rustsecp256k1_v0_4_1_scalar_mul_shift_var(&c1, k, &g1, 384);
rustsecp256k1_v0_4_1_scalar_mul_shift_var(&c2, k, &g2, 384);
rustsecp256k1_v0_4_1_scalar_mul(&c1, &c1, &minus_b1);
rustsecp256k1_v0_4_1_scalar_mul(&c2, &c2, &minus_b2);
rustsecp256k1_v0_4_1_scalar_add(r2, &c1, &c2);
rustsecp256k1_v0_4_1_scalar_mul(r1, r2, &rustsecp256k1_v0_4_1_const_lambda);
rustsecp256k1_v0_4_1_scalar_negate(r1, r1);
rustsecp256k1_v0_4_1_scalar_add(r1, r1, k);
#ifdef VERIFY
rustsecp256k1_v0_4_0_scalar_split_lambda_verify(r1, r2, k);
rustsecp256k1_v0_4_1_scalar_split_lambda_verify(r1, r2, k);
#endif
}
#ifdef VERIFY
/*
* Proof for rustsecp256k1_v0_4_0_scalar_split_lambda's bounds.
* Proof for rustsecp256k1_v0_4_1_scalar_split_lambda's bounds.
*
* Let
* - epsilon1 = 2^256 * |g1/2^384 - b2/d|
@ -479,8 +260,8 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda(rustsecp256k1_v0_4_0_scalar
*
* Q.E.D.
*/
static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_v0_4_0_scalar *r1, const rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *k) {
rustsecp256k1_v0_4_0_scalar s;
static void rustsecp256k1_v0_4_1_scalar_split_lambda_verify(const rustsecp256k1_v0_4_1_scalar *r1, const rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *k) {
rustsecp256k1_v0_4_1_scalar s;
unsigned char buf1[32];
unsigned char buf2[32];
@ -496,19 +277,19 @@ static void rustsecp256k1_v0_4_0_scalar_split_lambda_verify(const rustsecp256k1_
0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
};
rustsecp256k1_v0_4_0_scalar_mul(&s, &rustsecp256k1_v0_4_0_const_lambda, r2);
rustsecp256k1_v0_4_0_scalar_add(&s, &s, r1);
VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_eq(&s, k));
rustsecp256k1_v0_4_1_scalar_mul(&s, &rustsecp256k1_v0_4_1_const_lambda, r2);
rustsecp256k1_v0_4_1_scalar_add(&s, &s, r1);
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_eq(&s, k));
rustsecp256k1_v0_4_0_scalar_negate(&s, r1);
rustsecp256k1_v0_4_0_scalar_get_b32(buf1, r1);
rustsecp256k1_v0_4_0_scalar_get_b32(buf2, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_4_0_memcmp_var(buf2, k1_bound, 32) < 0);
rustsecp256k1_v0_4_1_scalar_negate(&s, r1);
rustsecp256k1_v0_4_1_scalar_get_b32(buf1, r1);
rustsecp256k1_v0_4_1_scalar_get_b32(buf2, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_4_1_memcmp_var(buf2, k1_bound, 32) < 0);
rustsecp256k1_v0_4_0_scalar_negate(&s, r2);
rustsecp256k1_v0_4_0_scalar_get_b32(buf1, r2);
rustsecp256k1_v0_4_0_scalar_get_b32(buf2, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_0_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_4_0_memcmp_var(buf2, k2_bound, 32) < 0);
rustsecp256k1_v0_4_1_scalar_negate(&s, r2);
rustsecp256k1_v0_4_1_scalar_get_b32(buf1, r2);
rustsecp256k1_v0_4_1_scalar_get_b32(buf2, &s);
VERIFY_CHECK(rustsecp256k1_v0_4_1_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_4_1_memcmp_var(buf2, k2_bound, 32) < 0);
}
#endif /* VERIFY */
#endif /* !defined(EXHAUSTIVE_TEST_ORDER) */

View File

@ -10,7 +10,7 @@
#include <stdint.h>
/** A scalar modulo the group order of the secp256k1 curve. */
typedef uint32_t rustsecp256k1_v0_4_0_scalar;
typedef uint32_t rustsecp256k1_v0_4_1_scalar;
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0)

View File

@ -11,43 +11,43 @@
#include <string.h>
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_even(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_even(const rustsecp256k1_v0_4_1_scalar *a) {
return !(*a & 1);
}
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_clear(rustsecp256k1_v0_4_0_scalar *r) { *r = 0; }
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_scalar_set_int(rustsecp256k1_v0_4_0_scalar *r, unsigned int v) { *r = v; }
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_clear(rustsecp256k1_v0_4_1_scalar *r) { *r = 0; }
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_scalar_set_int(rustsecp256k1_v0_4_1_scalar *r, unsigned int v) { *r = v; }
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
if (offset < 32)
return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
else
return 0;
}
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_0_scalar_get_bits_var(const rustsecp256k1_v0_4_0_scalar *a, unsigned int offset, unsigned int count) {
return rustsecp256k1_v0_4_0_scalar_get_bits(a, offset, count);
SECP256K1_INLINE static unsigned int rustsecp256k1_v0_4_1_scalar_get_bits_var(const rustsecp256k1_v0_4_1_scalar *a, unsigned int offset, unsigned int count) {
return rustsecp256k1_v0_4_1_scalar_get_bits(a, offset, count);
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_check_overflow(const rustsecp256k1_v0_4_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_check_overflow(const rustsecp256k1_v0_4_1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
static int rustsecp256k1_v0_4_0_scalar_add(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static int rustsecp256k1_v0_4_1_scalar_add(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
return *r < *b;
}
static void rustsecp256k1_v0_4_0_scalar_cadd_bit(rustsecp256k1_v0_4_0_scalar *r, unsigned int bit, int flag) {
static void rustsecp256k1_v0_4_1_scalar_cadd_bit(rustsecp256k1_v0_4_1_scalar *r, unsigned int bit, int flag) {
if (flag && bit < 32)
*r += ((uint32_t)1 << bit);
#ifdef VERIFY
VERIFY_CHECK(bit < 32);
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
VERIFY_CHECK(rustsecp256k1_v0_4_0_scalar_check_overflow(r) == 0);
VERIFY_CHECK(rustsecp256k1_v0_4_1_scalar_check_overflow(r) == 0);
#endif
}
static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r, const unsigned char *b32, int *overflow) {
static void rustsecp256k1_v0_4_1_scalar_set_b32(rustsecp256k1_v0_4_1_scalar *r, const unsigned char *b32, int *overflow) {
int i;
int over = 0;
*r = 0;
@ -61,16 +61,16 @@ static void rustsecp256k1_v0_4_0_scalar_set_b32(rustsecp256k1_v0_4_0_scalar *r,
if (overflow) *overflow = over;
}
static void rustsecp256k1_v0_4_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_0_scalar* a) {
static void rustsecp256k1_v0_4_1_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_4_1_scalar* a) {
memset(bin, 0, 32);
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_zero(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_zero(const rustsecp256k1_v0_4_1_scalar *a) {
return *a == 0;
}
static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
static void rustsecp256k1_v0_4_1_scalar_negate(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a) {
if (*a == 0) {
*r = 0;
} else {
@ -78,24 +78,24 @@ static void rustsecp256k1_v0_4_0_scalar_negate(rustsecp256k1_v0_4_0_scalar *r, c
}
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_is_one(const rustsecp256k1_v0_4_0_scalar *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_is_one(const rustsecp256k1_v0_4_1_scalar *a) {
return *a == 1;
}
static int rustsecp256k1_v0_4_0_scalar_is_high(const rustsecp256k1_v0_4_0_scalar *a) {
static int rustsecp256k1_v0_4_1_scalar_is_high(const rustsecp256k1_v0_4_1_scalar *a) {
return *a > EXHAUSTIVE_TEST_ORDER / 2;
}
static int rustsecp256k1_v0_4_0_scalar_cond_negate(rustsecp256k1_v0_4_0_scalar *r, int flag) {
if (flag) rustsecp256k1_v0_4_0_scalar_negate(r, r);
static int rustsecp256k1_v0_4_1_scalar_cond_negate(rustsecp256k1_v0_4_1_scalar *r, int flag) {
if (flag) rustsecp256k1_v0_4_1_scalar_negate(r, r);
return flag ? -1 : 1;
}
static void rustsecp256k1_v0_4_0_scalar_mul(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
static void rustsecp256k1_v0_4_1_scalar_mul(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
}
static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, int n) {
static int rustsecp256k1_v0_4_1_scalar_shr_int(rustsecp256k1_v0_4_1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
@ -104,20 +104,16 @@ static int rustsecp256k1_v0_4_0_scalar_shr_int(rustsecp256k1_v0_4_0_scalar *r, i
return ret;
}
static void rustsecp256k1_v0_4_0_scalar_sqr(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a) {
*r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
}
static void rustsecp256k1_v0_4_0_scalar_split_128(rustsecp256k1_v0_4_0_scalar *r1, rustsecp256k1_v0_4_0_scalar *r2, const rustsecp256k1_v0_4_0_scalar *a) {
static void rustsecp256k1_v0_4_1_scalar_split_128(rustsecp256k1_v0_4_1_scalar *r1, rustsecp256k1_v0_4_1_scalar *r2, const rustsecp256k1_v0_4_1_scalar *a) {
*r1 = *a;
*r2 = 0;
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_scalar_eq(const rustsecp256k1_v0_4_0_scalar *a, const rustsecp256k1_v0_4_0_scalar *b) {
SECP256K1_INLINE static int rustsecp256k1_v0_4_1_scalar_eq(const rustsecp256k1_v0_4_1_scalar *a, const rustsecp256k1_v0_4_1_scalar *b) {
return *a == *b;
}
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4_0_scalar *r, const rustsecp256k1_v0_4_0_scalar *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_scalar_cmov(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *a, int flag) {
uint32_t mask0, mask1;
VG_CHECK_VERIFY(r, sizeof(*r));
mask0 = flag + ~((uint32_t)0);
@ -125,4 +121,19 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_scalar_cmov(rustsecp256k1_v0_4
*r = (*r & mask0) | (*a & mask1);
}
static void rustsecp256k1_v0_4_1_scalar_inverse(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
int i;
*r = 0;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i;
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0);
}
static void rustsecp256k1_v0_4_1_scalar_inverse_var(rustsecp256k1_v0_4_1_scalar *r, const rustsecp256k1_v0_4_1_scalar *x) {
rustsecp256k1_v0_4_1_scalar_inverse(r, x);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -4,12 +4,12 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_SCRATCH_
#define _SECP256K1_SCRATCH_
#ifndef SECP256K1_SCRATCH_H
#define SECP256K1_SCRATCH_H
/* The typedef is used internally; the struct name is used in the public API
* (where it is exposed as a different typedef) */
typedef struct rustsecp256k1_v0_4_0_scratch_space_struct {
typedef struct rustsecp256k1_v0_4_1_scratch_space_struct {
/** guard against interpreting this object as other types */
unsigned char magic[8];
/** actual allocated data */
@ -19,24 +19,24 @@ typedef struct rustsecp256k1_v0_4_0_scratch_space_struct {
size_t alloc_size;
/** maximum size available to allocate */
size_t max_size;
} rustsecp256k1_v0_4_0_scratch;
} rustsecp256k1_v0_4_1_scratch;
static rustsecp256k1_v0_4_0_scratch* rustsecp256k1_v0_4_0_scratch_create(const rustsecp256k1_v0_4_0_callback* error_callback, size_t max_size);
static rustsecp256k1_v0_4_1_scratch* rustsecp256k1_v0_4_1_scratch_create(const rustsecp256k1_v0_4_1_callback* error_callback, size_t max_size);
static void rustsecp256k1_v0_4_0_scratch_destroy(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch);
static void rustsecp256k1_v0_4_1_scratch_destroy(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch);
/** Returns an opaque object used to "checkpoint" a scratch space. Used
* with `rustsecp256k1_v0_4_0_scratch_apply_checkpoint` to undo allocations. */
static size_t rustsecp256k1_v0_4_0_scratch_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch);
* with `rustsecp256k1_v0_4_1_scratch_apply_checkpoint` to undo allocations. */
static size_t rustsecp256k1_v0_4_1_scratch_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch);
/** Applies a check point received from `rustsecp256k1_v0_4_0_scratch_checkpoint`,
/** Applies a check point received from `rustsecp256k1_v0_4_1_scratch_checkpoint`,
* undoing all allocations since that point. */
static void rustsecp256k1_v0_4_0_scratch_apply_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t checkpoint);
static void rustsecp256k1_v0_4_1_scratch_apply_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t checkpoint);
/** Returns the maximum allocation the scratch space will allow */
static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch, size_t n_objects);
static size_t rustsecp256k1_v0_4_1_scratch_max_allocation(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch, size_t n_objects);
/** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */
static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t n);
static void *rustsecp256k1_v0_4_1_scratch_alloc(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t n);
#endif

View File

@ -4,35 +4,35 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef _SECP256K1_SCRATCH_IMPL_H_
#define _SECP256K1_SCRATCH_IMPL_H_
#ifndef SECP256K1_SCRATCH_IMPL_H
#define SECP256K1_SCRATCH_IMPL_H
#include "util.h"
#include "scratch.h"
static size_t rustsecp256k1_v0_4_0_scratch_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch) {
if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space");
static size_t rustsecp256k1_v0_4_1_scratch_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch) {
if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space");
return 0;
}
return scratch->alloc_size;
}
static void rustsecp256k1_v0_4_0_scratch_apply_checkpoint(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t checkpoint) {
if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space");
static void rustsecp256k1_v0_4_1_scratch_apply_checkpoint(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t checkpoint) {
if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space");
return;
}
if (checkpoint > scratch->alloc_size) {
rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid checkpoint");
rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid checkpoint");
return;
}
scratch->alloc_size = checkpoint;
}
static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0_4_0_callback* error_callback, const rustsecp256k1_v0_4_0_scratch* scratch, size_t objects) {
if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space");
static size_t rustsecp256k1_v0_4_1_scratch_max_allocation(const rustsecp256k1_v0_4_1_callback* error_callback, const rustsecp256k1_v0_4_1_scratch* scratch, size_t objects) {
if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space");
return 0;
}
/* Ensure that multiplication will not wrap around */
@ -45,7 +45,7 @@ static size_t rustsecp256k1_v0_4_0_scratch_max_allocation(const rustsecp256k1_v0
return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1);
}
static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callback* error_callback, rustsecp256k1_v0_4_0_scratch* scratch, size_t size) {
static void *rustsecp256k1_v0_4_1_scratch_alloc(const rustsecp256k1_v0_4_1_callback* error_callback, rustsecp256k1_v0_4_1_scratch* scratch, size_t size) {
void *ret;
size_t rounded_size;
@ -56,8 +56,8 @@ static void *rustsecp256k1_v0_4_0_scratch_alloc(const rustsecp256k1_v0_4_0_callb
}
size = rounded_size;
if (rustsecp256k1_v0_4_0_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_0_callback_call(error_callback, "invalid scratch space");
if (rustsecp256k1_v0_4_1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
rustsecp256k1_v0_4_1_callback_call(error_callback, "invalid scratch space");
return NULL;
}

View File

@ -4,12 +4,13 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h"
#define SECP256K1_BUILD
#include "../include/secp256k1.h"
#include "../include/secp256k1_preallocated.h"
#include "assumptions.h"
#include "util.h"
#include "num_impl.h"
#include "field_impl.h"
#include "scalar_impl.h"
#include "group_impl.h"
@ -22,75 +23,79 @@
#include "scratch_impl.h"
#include "selftest.h"
#ifdef SECP256K1_NO_BUILD
# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c"
#endif
#if defined(VALGRIND)
# include <valgrind/memcheck.h>
#endif
#define ARG_CHECK(cond) do { \
if (EXPECT(!(cond), 0)) { \
rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \
rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \
return 0; \
} \
} while(0)
#define ARG_CHECK_NO_RETURN(cond) do { \
if (EXPECT(!(cond), 0)) { \
rustsecp256k1_v0_4_0_callback_call(&ctx->illegal_callback, #cond); \
rustsecp256k1_v0_4_1_callback_call(&ctx->illegal_callback, #cond); \
} \
} while(0)
#ifndef USE_EXTERNAL_DEFAULT_CALLBACKS
#include <stdlib.h>
#include <stdio.h>
static void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data) {
static void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
abort();
}
static void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data) {
static void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
abort();
}
#else
void rustsecp256k1_v0_4_0_default_illegal_callback_fn(const char* str, void* data);
void rustsecp256k1_v0_4_0_default_error_callback_fn(const char* str, void* data);
void rustsecp256k1_v0_4_1_default_illegal_callback_fn(const char* str, void* data);
void rustsecp256k1_v0_4_1_default_error_callback_fn(const char* str, void* data);
#endif
static const rustsecp256k1_v0_4_0_callback default_illegal_callback = {
rustsecp256k1_v0_4_0_default_illegal_callback_fn,
static const rustsecp256k1_v0_4_1_callback default_illegal_callback = {
rustsecp256k1_v0_4_1_default_illegal_callback_fn,
NULL
};
static const rustsecp256k1_v0_4_0_callback default_error_callback = {
rustsecp256k1_v0_4_0_default_error_callback_fn,
static const rustsecp256k1_v0_4_1_callback default_error_callback = {
rustsecp256k1_v0_4_1_default_error_callback_fn,
NULL
};
struct rustsecp256k1_v0_4_0_context_struct {
rustsecp256k1_v0_4_0_ecmult_context ecmult_ctx;
rustsecp256k1_v0_4_0_ecmult_gen_context ecmult_gen_ctx;
rustsecp256k1_v0_4_0_callback illegal_callback;
rustsecp256k1_v0_4_0_callback error_callback;
struct rustsecp256k1_v0_4_1_context_struct {
rustsecp256k1_v0_4_1_ecmult_context ecmult_ctx;
rustsecp256k1_v0_4_1_ecmult_gen_context ecmult_gen_ctx;
rustsecp256k1_v0_4_1_callback illegal_callback;
rustsecp256k1_v0_4_1_callback error_callback;
int declassify;
};
static const rustsecp256k1_v0_4_0_context rustsecp256k1_v0_4_0_context_no_precomp_ = {
static const rustsecp256k1_v0_4_1_context rustsecp256k1_v0_4_1_context_no_precomp_ = {
{ 0 },
{ 0 },
{ rustsecp256k1_v0_4_0_default_illegal_callback_fn, 0 },
{ rustsecp256k1_v0_4_0_default_error_callback_fn, 0 },
{ rustsecp256k1_v0_4_1_default_illegal_callback_fn, 0 },
{ rustsecp256k1_v0_4_1_default_error_callback_fn, 0 },
0
};
const rustsecp256k1_v0_4_0_context *rustsecp256k1_v0_4_0_context_no_precomp = &rustsecp256k1_v0_4_0_context_no_precomp_;
const rustsecp256k1_v0_4_1_context *rustsecp256k1_v0_4_1_context_no_precomp = &rustsecp256k1_v0_4_1_context_no_precomp_;
size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) {
size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context));
size_t rustsecp256k1_v0_4_1_context_preallocated_size(unsigned int flags) {
size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context));
/* A return value of 0 is reserved as an indicator for errors when we call this function internally. */
VERIFY_CHECK(ret != 0);
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
rustsecp256k1_v0_4_0_callback_call(&default_illegal_callback,
rustsecp256k1_v0_4_1_callback_call(&default_illegal_callback,
"Invalid flags");
return 0;
}
@ -104,87 +109,87 @@ size_t rustsecp256k1_v0_4_0_context_preallocated_size(unsigned int flags) {
return ret;
}
size_t rustsecp256k1_v0_4_0_context_preallocated_clone_size(const rustsecp256k1_v0_4_0_context* ctx) {
size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_0_context));
size_t rustsecp256k1_v0_4_1_context_preallocated_clone_size(const rustsecp256k1_v0_4_1_context* ctx) {
size_t ret = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_4_1_context));
VERIFY_CHECK(ctx != NULL);
if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
}
if (rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)) {
if (rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx)) {
ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
}
return ret;
}
rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_create(void* prealloc, unsigned int flags) {
rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_create(void* prealloc, unsigned int flags) {
void* const base = prealloc;
size_t prealloc_size;
rustsecp256k1_v0_4_0_context* ret;
rustsecp256k1_v0_4_1_context* ret;
if (!rustsecp256k1_v0_4_0_selftest()) {
rustsecp256k1_v0_4_0_callback_call(&default_error_callback, "self test failed");
if (!rustsecp256k1_v0_4_1_selftest()) {
rustsecp256k1_v0_4_1_callback_call(&default_error_callback, "self test failed");
}
prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_size(flags);
prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_size(flags);
if (prealloc_size == 0) {
return NULL;
}
VERIFY_CHECK(prealloc != NULL);
ret = (rustsecp256k1_v0_4_0_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_0_context), base, prealloc_size);
ret = (rustsecp256k1_v0_4_1_context*)manual_alloc(&prealloc, sizeof(rustsecp256k1_v0_4_1_context), base, prealloc_size);
ret->illegal_callback = default_illegal_callback;
ret->error_callback = default_error_callback;
rustsecp256k1_v0_4_0_ecmult_context_init(&ret->ecmult_ctx);
rustsecp256k1_v0_4_0_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
rustsecp256k1_v0_4_1_ecmult_context_init(&ret->ecmult_ctx);
rustsecp256k1_v0_4_1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
/* Flags have been checked by rustsecp256k1_v0_4_0_context_preallocated_size. */
/* Flags have been checked by rustsecp256k1_v0_4_1_context_preallocated_size. */
VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT);
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
rustsecp256k1_v0_4_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc);
rustsecp256k1_v0_4_1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc);
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
rustsecp256k1_v0_4_0_ecmult_context_build(&ret->ecmult_ctx, &prealloc);
rustsecp256k1_v0_4_1_ecmult_context_build(&ret->ecmult_ctx, &prealloc);
}
ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY);
return (rustsecp256k1_v0_4_0_context*) ret;
return (rustsecp256k1_v0_4_1_context*) ret;
}
rustsecp256k1_v0_4_0_context* rustsecp256k1_v0_4_0_context_preallocated_clone(const rustsecp256k1_v0_4_0_context* ctx, void* prealloc) {
rustsecp256k1_v0_4_1_context* rustsecp256k1_v0_4_1_context_preallocated_clone(const rustsecp256k1_v0_4_1_context* ctx, void* prealloc) {
size_t prealloc_size;
rustsecp256k1_v0_4_0_context* ret;
rustsecp256k1_v0_4_1_context* ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(prealloc != NULL);
prealloc_size = rustsecp256k1_v0_4_0_context_preallocated_clone_size(ctx);
ret = (rustsecp256k1_v0_4_0_context*)prealloc;
prealloc_size = rustsecp256k1_v0_4_1_context_preallocated_clone_size(ctx);
ret = (rustsecp256k1_v0_4_1_context*)prealloc;
memcpy(ret, ctx, prealloc_size);
rustsecp256k1_v0_4_0_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx);
rustsecp256k1_v0_4_0_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx);
rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx);
rustsecp256k1_v0_4_1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx);
return ret;
}
void rustsecp256k1_v0_4_0_context_preallocated_destroy(rustsecp256k1_v0_4_0_context* ctx) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp);
void rustsecp256k1_v0_4_1_context_preallocated_destroy(rustsecp256k1_v0_4_1_context* ctx) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp);
if (ctx != NULL) {
rustsecp256k1_v0_4_0_ecmult_context_clear(&ctx->ecmult_ctx);
rustsecp256k1_v0_4_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
rustsecp256k1_v0_4_1_ecmult_context_clear(&ctx->ecmult_ctx);
rustsecp256k1_v0_4_1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
}
}
void rustsecp256k1_v0_4_0_context_set_illegal_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp);
void rustsecp256k1_v0_4_1_context_set_illegal_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp);
if (fun == NULL) {
fun = rustsecp256k1_v0_4_0_default_illegal_callback_fn;
fun = rustsecp256k1_v0_4_1_default_illegal_callback_fn;
}
ctx->illegal_callback.fn = fun;
ctx->illegal_callback.data = data;
}
void rustsecp256k1_v0_4_0_context_set_error_callback(rustsecp256k1_v0_4_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_0_context_no_precomp);
void rustsecp256k1_v0_4_1_context_set_error_callback(rustsecp256k1_v0_4_1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_4_1_context_no_precomp);
if (fun == NULL) {
fun = rustsecp256k1_v0_4_0_default_error_callback_fn;
fun = rustsecp256k1_v0_4_1_default_error_callback_fn;
}
ctx->error_callback.fn = fun;
ctx->error_callback.data = data;
@ -194,7 +199,7 @@ void rustsecp256k1_v0_4_0_context_set_error_callback(rustsecp256k1_v0_4_0_contex
* of the software. This is setup for use with valgrind but could be substituted with
* the appropriate instrumentation for other analysis tools.
*/
static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1_v0_4_0_context* ctx, const void *p, size_t len) {
static SECP256K1_INLINE void rustsecp256k1_v0_4_1_declassify(const rustsecp256k1_v0_4_1_context* ctx, const void *p, size_t len) {
#if defined(VALGRIND)
if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len);
#else
@ -204,59 +209,59 @@ static SECP256K1_INLINE void rustsecp256k1_v0_4_0_declassify(const rustsecp256k1
#endif
}
static int rustsecp256k1_v0_4_0_pubkey_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ge* ge, const rustsecp256k1_v0_4_0_pubkey* pubkey) {
if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) {
/* When the rustsecp256k1_v0_4_0_ge_storage type is exactly 64 byte, use its
* representation inside rustsecp256k1_v0_4_0_pubkey, as conversion is very fast.
* Note that rustsecp256k1_v0_4_0_pubkey_save must use the same representation. */
rustsecp256k1_v0_4_0_ge_storage s;
static int rustsecp256k1_v0_4_1_pubkey_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ge* ge, const rustsecp256k1_v0_4_1_pubkey* pubkey) {
if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) {
/* When the rustsecp256k1_v0_4_1_ge_storage type is exactly 64 byte, use its
* representation inside rustsecp256k1_v0_4_1_pubkey, as conversion is very fast.
* Note that rustsecp256k1_v0_4_1_pubkey_save must use the same representation. */
rustsecp256k1_v0_4_1_ge_storage s;
memcpy(&s, &pubkey->data[0], sizeof(s));
rustsecp256k1_v0_4_0_ge_from_storage(ge, &s);
rustsecp256k1_v0_4_1_ge_from_storage(ge, &s);
} else {
/* Otherwise, fall back to 32-byte big endian for X and Y. */
rustsecp256k1_v0_4_0_fe x, y;
rustsecp256k1_v0_4_0_fe_set_b32(&x, pubkey->data);
rustsecp256k1_v0_4_0_fe_set_b32(&y, pubkey->data + 32);
rustsecp256k1_v0_4_0_ge_set_xy(ge, &x, &y);
rustsecp256k1_v0_4_1_fe x, y;
rustsecp256k1_v0_4_1_fe_set_b32(&x, pubkey->data);
rustsecp256k1_v0_4_1_fe_set_b32(&y, pubkey->data + 32);
rustsecp256k1_v0_4_1_ge_set_xy(ge, &x, &y);
}
ARG_CHECK(!rustsecp256k1_v0_4_0_fe_is_zero(&ge->x));
ARG_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(&ge->x));
return 1;
}
static void rustsecp256k1_v0_4_0_pubkey_save(rustsecp256k1_v0_4_0_pubkey* pubkey, rustsecp256k1_v0_4_0_ge* ge) {
if (sizeof(rustsecp256k1_v0_4_0_ge_storage) == 64) {
rustsecp256k1_v0_4_0_ge_storage s;
rustsecp256k1_v0_4_0_ge_to_storage(&s, ge);
static void rustsecp256k1_v0_4_1_pubkey_save(rustsecp256k1_v0_4_1_pubkey* pubkey, rustsecp256k1_v0_4_1_ge* ge) {
if (sizeof(rustsecp256k1_v0_4_1_ge_storage) == 64) {
rustsecp256k1_v0_4_1_ge_storage s;
rustsecp256k1_v0_4_1_ge_to_storage(&s, ge);
memcpy(&pubkey->data[0], &s, sizeof(s));
} else {
VERIFY_CHECK(!rustsecp256k1_v0_4_0_ge_is_infinity(ge));
rustsecp256k1_v0_4_0_fe_normalize_var(&ge->x);
rustsecp256k1_v0_4_0_fe_normalize_var(&ge->y);
rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data, &ge->x);
rustsecp256k1_v0_4_0_fe_get_b32(pubkey->data + 32, &ge->y);
VERIFY_CHECK(!rustsecp256k1_v0_4_1_ge_is_infinity(ge));
rustsecp256k1_v0_4_1_fe_normalize_var(&ge->x);
rustsecp256k1_v0_4_1_fe_normalize_var(&ge->y);
rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data, &ge->x);
rustsecp256k1_v0_4_1_fe_get_b32(pubkey->data + 32, &ge->y);
}
}
int rustsecp256k1_v0_4_0_ec_pubkey_parse(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
rustsecp256k1_v0_4_0_ge Q;
int rustsecp256k1_v0_4_1_ec_pubkey_parse(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
rustsecp256k1_v0_4_1_ge Q;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(input != NULL);
if (!rustsecp256k1_v0_4_0_eckey_pubkey_parse(&Q, input, inputlen)) {
if (!rustsecp256k1_v0_4_1_eckey_pubkey_parse(&Q, input, inputlen)) {
return 0;
}
if (!rustsecp256k1_v0_4_0_ge_is_in_correct_subgroup(&Q)) {
if (!rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(&Q)) {
return 0;
}
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &Q);
rustsecp256k1_v0_4_0_ge_clear(&Q);
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &Q);
rustsecp256k1_v0_4_1_ge_clear(&Q);
return 1;
}
int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_pubkey* pubkey, unsigned int flags) {
rustsecp256k1_v0_4_0_ge Q;
int rustsecp256k1_v0_4_1_ec_pubkey_serialize(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_pubkey* pubkey, unsigned int flags) {
rustsecp256k1_v0_4_1_ge Q;
size_t len;
int ret = 0;
@ -269,8 +274,8 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context*
memset(output, 0, len);
ARG_CHECK(pubkey != NULL);
ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION);
if (rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubkey)) {
ret = rustsecp256k1_v0_4_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION);
if (rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubkey)) {
ret = rustsecp256k1_v0_4_1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION);
if (ret) {
*outputlen = len;
}
@ -278,39 +283,65 @@ int rustsecp256k1_v0_4_0_ec_pubkey_serialize(const rustsecp256k1_v0_4_0_context*
return ret;
}
static void rustsecp256k1_v0_4_0_ecdsa_signature_load(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) {
int rustsecp256k1_v0_4_1_ec_pubkey_cmp(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_pubkey* pubkey0, const rustsecp256k1_v0_4_1_pubkey* pubkey1) {
unsigned char out[2][33];
const rustsecp256k1_v0_4_1_pubkey* pk[2];
int i;
VERIFY_CHECK(ctx != NULL);
pk[0] = pubkey0; pk[1] = pubkey1;
for (i = 0; i < 2; i++) {
size_t out_size = sizeof(out[i]);
/* If the public key is NULL or invalid, ec_pubkey_serialize will call
* the illegal_callback and return 0. In that case we will serialize the
* key as all zeros which is less than any valid public key. This
* results in consistent comparisons even if NULL or invalid pubkeys are
* involved and prevents edge cases such as sorting algorithms that use
* this function and do not terminate as a result. */
if (!rustsecp256k1_v0_4_1_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) {
/* Note that ec_pubkey_serialize should already set the output to
* zero in that case, but it's not guaranteed by the API, we can't
* test it and writing a VERIFY_CHECK is more complex than
* explicitly memsetting (again). */
memset(out[i], 0, sizeof(out[i]));
}
}
return rustsecp256k1_v0_4_1_memcmp_var(out[0], out[1], sizeof(out[0]));
}
static void rustsecp256k1_v0_4_1_ecdsa_signature_load(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) {
(void)ctx;
if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) {
/* When the rustsecp256k1_v0_4_0_scalar type is exactly 32 byte, use its
* representation inside rustsecp256k1_v0_4_0_ecdsa_signature, as conversion is very fast.
* Note that rustsecp256k1_v0_4_0_ecdsa_signature_save must use the same representation. */
if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) {
/* When the rustsecp256k1_v0_4_1_scalar type is exactly 32 byte, use its
* representation inside rustsecp256k1_v0_4_1_ecdsa_signature, as conversion is very fast.
* Note that rustsecp256k1_v0_4_1_ecdsa_signature_save must use the same representation. */
memcpy(r, &sig->data[0], 32);
memcpy(s, &sig->data[32], 32);
} else {
rustsecp256k1_v0_4_0_scalar_set_b32(r, &sig->data[0], NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(s, &sig->data[32], NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(r, &sig->data[0], NULL);
rustsecp256k1_v0_4_1_scalar_set_b32(s, &sig->data[32], NULL);
}
}
static void rustsecp256k1_v0_4_0_ecdsa_signature_save(rustsecp256k1_v0_4_0_ecdsa_signature* sig, const rustsecp256k1_v0_4_0_scalar* r, const rustsecp256k1_v0_4_0_scalar* s) {
if (sizeof(rustsecp256k1_v0_4_0_scalar) == 32) {
static void rustsecp256k1_v0_4_1_ecdsa_signature_save(rustsecp256k1_v0_4_1_ecdsa_signature* sig, const rustsecp256k1_v0_4_1_scalar* r, const rustsecp256k1_v0_4_1_scalar* s) {
if (sizeof(rustsecp256k1_v0_4_1_scalar) == 32) {
memcpy(&sig->data[0], r, 32);
memcpy(&sig->data[32], s, 32);
} else {
rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[0], r);
rustsecp256k1_v0_4_0_scalar_get_b32(&sig->data[32], s);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[0], r);
rustsecp256k1_v0_4_1_scalar_get_b32(&sig->data[32], s);
}
}
int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
rustsecp256k1_v0_4_1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(input != NULL);
if (rustsecp256k1_v0_4_0_ecdsa_sig_parse(&r, &s, input, inputlen)) {
rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s);
if (rustsecp256k1_v0_4_1_ecdsa_sig_parse(&r, &s, input, inputlen)) {
rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s);
return 1;
} else {
memset(sig, 0, sizeof(*sig));
@ -318,8 +349,8 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_4_0_co
}
}
int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature* sig, const unsigned char *input64) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature* sig, const unsigned char *input64) {
rustsecp256k1_v0_4_1_scalar r, s;
int ret = 1;
int overflow = 0;
@ -327,77 +358,77 @@ int rustsecp256k1_v0_4_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_4_
ARG_CHECK(sig != NULL);
ARG_CHECK(input64 != NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(&r, &input64[0], &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(&r, &input64[0], &overflow);
ret &= !overflow;
rustsecp256k1_v0_4_0_scalar_set_b32(&s, &input64[32], &overflow);
rustsecp256k1_v0_4_1_scalar_set_b32(&s, &input64[32], &overflow);
ret &= !overflow;
if (ret) {
rustsecp256k1_v0_4_0_ecdsa_signature_save(sig, &r, &s);
rustsecp256k1_v0_4_1_ecdsa_signature_save(sig, &r, &s);
} else {
memset(sig, 0, sizeof(*sig));
}
return ret;
}
int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_der(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) {
rustsecp256k1_v0_4_1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output != NULL);
ARG_CHECK(outputlen != NULL);
ARG_CHECK(sig != NULL);
rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig);
return rustsecp256k1_v0_4_0_ecdsa_sig_serialize(output, outputlen, &r, &s);
rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig);
return rustsecp256k1_v0_4_1_ecdsa_sig_serialize(output, outputlen, &r, &s);
}
int rustsecp256k1_v0_4_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_0_ecdsa_signature* sig) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *output64, const rustsecp256k1_v0_4_1_ecdsa_signature* sig) {
rustsecp256k1_v0_4_1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output64 != NULL);
ARG_CHECK(sig != NULL);
rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig);
rustsecp256k1_v0_4_0_scalar_get_b32(&output64[0], &r);
rustsecp256k1_v0_4_0_scalar_get_b32(&output64[32], &s);
rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig);
rustsecp256k1_v0_4_1_scalar_get_b32(&output64[0], &r);
rustsecp256k1_v0_4_1_scalar_get_b32(&output64[32], &s);
return 1;
}
int rustsecp256k1_v0_4_0_ecdsa_signature_normalize(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *sigout, const rustsecp256k1_v0_4_0_ecdsa_signature *sigin) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_signature_normalize(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *sigout, const rustsecp256k1_v0_4_1_ecdsa_signature *sigin) {
rustsecp256k1_v0_4_1_scalar r, s;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(sigin != NULL);
rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sigin);
ret = rustsecp256k1_v0_4_0_scalar_is_high(&s);
rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sigin);
ret = rustsecp256k1_v0_4_1_scalar_is_high(&s);
if (sigout != NULL) {
if (ret) {
rustsecp256k1_v0_4_0_scalar_negate(&s, &s);
rustsecp256k1_v0_4_1_scalar_negate(&s, &s);
}
rustsecp256k1_v0_4_0_ecdsa_signature_save(sigout, &r, &s);
rustsecp256k1_v0_4_1_ecdsa_signature_save(sigout, &r, &s);
}
return ret;
}
int rustsecp256k1_v0_4_0_ecdsa_verify(const rustsecp256k1_v0_4_0_context* ctx, const rustsecp256k1_v0_4_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_0_pubkey *pubkey) {
rustsecp256k1_v0_4_0_ge q;
rustsecp256k1_v0_4_0_scalar r, s;
rustsecp256k1_v0_4_0_scalar m;
int rustsecp256k1_v0_4_1_ecdsa_verify(const rustsecp256k1_v0_4_1_context* ctx, const rustsecp256k1_v0_4_1_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_4_1_pubkey *pubkey) {
rustsecp256k1_v0_4_1_ge q;
rustsecp256k1_v0_4_1_scalar r, s;
rustsecp256k1_v0_4_1_scalar m;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(msghash32 != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(pubkey != NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(&m, msghash32, NULL);
rustsecp256k1_v0_4_0_ecdsa_signature_load(ctx, &r, &s, sig);
return (!rustsecp256k1_v0_4_0_scalar_is_high(&s) &&
rustsecp256k1_v0_4_0_pubkey_load(ctx, &q, pubkey) &&
rustsecp256k1_v0_4_0_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
rustsecp256k1_v0_4_1_scalar_set_b32(&m, msghash32, NULL);
rustsecp256k1_v0_4_1_ecdsa_signature_load(ctx, &r, &s, sig);
return (!rustsecp256k1_v0_4_1_scalar_is_high(&s) &&
rustsecp256k1_v0_4_1_pubkey_load(ctx, &q, pubkey) &&
rustsecp256k1_v0_4_1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
}
static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) {
@ -408,7 +439,7 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off
static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
unsigned char keydata[112];
unsigned int offset = 0;
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rng;
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng;
unsigned int i;
/* We feed a byte array to the PRNG as input, consisting of:
* - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d.
@ -426,51 +457,51 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m
if (algo16 != NULL) {
buffer_append(keydata, &offset, algo16, 16);
}
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset);
memset(keydata, 0, sizeof(keydata));
for (i = 0; i <= counter; i++) {
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
}
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_finalize(&rng);
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng);
return 1;
}
const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_rfc6979 = nonce_function_rfc6979;
const rustsecp256k1_v0_4_0_nonce_function rustsecp256k1_v0_4_0_nonce_function_default = nonce_function_rfc6979;
const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_rfc6979 = nonce_function_rfc6979;
const rustsecp256k1_v0_4_1_nonce_function rustsecp256k1_v0_4_1_nonce_function_default = nonce_function_rfc6979;
static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_scalar* r, rustsecp256k1_v0_4_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_0_scalar sec, non, msg;
static int rustsecp256k1_v0_4_1_ecdsa_sign_inner(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_scalar* r, rustsecp256k1_v0_4_1_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_1_scalar sec, non, msg;
int ret = 0;
int is_sec_valid;
unsigned char nonce32[32];
unsigned int count = 0;
/* Default initialization here is important so we won't pass uninit values to the cmov in the end */
*r = rustsecp256k1_v0_4_0_scalar_zero;
*s = rustsecp256k1_v0_4_0_scalar_zero;
*r = rustsecp256k1_v0_4_1_scalar_zero;
*s = rustsecp256k1_v0_4_1_scalar_zero;
if (recid) {
*recid = 0;
}
if (noncefp == NULL) {
noncefp = rustsecp256k1_v0_4_0_nonce_function_default;
noncefp = rustsecp256k1_v0_4_1_nonce_function_default;
}
/* Fail if the secret key is invalid. */
is_sec_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_one, !is_sec_valid);
rustsecp256k1_v0_4_0_scalar_set_b32(&msg, msg32, NULL);
is_sec_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_one, !is_sec_valid);
rustsecp256k1_v0_4_1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
int is_nonce_valid;
ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
is_nonce_valid = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&non, nonce32);
is_nonce_valid = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&non, nonce32);
/* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */
rustsecp256k1_v0_4_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid));
rustsecp256k1_v0_4_1_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid));
if (is_nonce_valid) {
ret = rustsecp256k1_v0_4_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid);
ret = rustsecp256k1_v0_4_1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid);
/* The final signature is no longer a secret, nor is the fact that we were successful or not. */
rustsecp256k1_v0_4_0_declassify(ctx, &ret, sizeof(ret));
rustsecp256k1_v0_4_1_declassify(ctx, &ret, sizeof(ret));
if (ret) {
break;
}
@ -482,204 +513,204 @@ static int rustsecp256k1_v0_4_0_ecdsa_sign_inner(const rustsecp256k1_v0_4_0_cont
* used as a branching variable. */
ret &= is_sec_valid;
memset(nonce32, 0, 32);
rustsecp256k1_v0_4_0_scalar_clear(&msg);
rustsecp256k1_v0_4_0_scalar_clear(&non);
rustsecp256k1_v0_4_0_scalar_clear(&sec);
rustsecp256k1_v0_4_0_scalar_cmov(r, &rustsecp256k1_v0_4_0_scalar_zero, !ret);
rustsecp256k1_v0_4_0_scalar_cmov(s, &rustsecp256k1_v0_4_0_scalar_zero, !ret);
rustsecp256k1_v0_4_1_scalar_clear(&msg);
rustsecp256k1_v0_4_1_scalar_clear(&non);
rustsecp256k1_v0_4_1_scalar_clear(&sec);
rustsecp256k1_v0_4_1_scalar_cmov(r, &rustsecp256k1_v0_4_1_scalar_zero, !ret);
rustsecp256k1_v0_4_1_scalar_cmov(s, &rustsecp256k1_v0_4_1_scalar_zero, !ret);
if (recid) {
const int zero = 0;
rustsecp256k1_v0_4_0_int_cmov(recid, &zero, !ret);
rustsecp256k1_v0_4_1_int_cmov(recid, &zero, !ret);
}
return ret;
}
int rustsecp256k1_v0_4_0_ecdsa_sign(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_0_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_0_scalar r, s;
int rustsecp256k1_v0_4_1_ecdsa_sign(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_4_1_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_4_1_scalar r, s;
int ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msghash32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(seckey != NULL);
ret = rustsecp256k1_v0_4_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata);
rustsecp256k1_v0_4_0_ecdsa_signature_save(signature, &r, &s);
ret = rustsecp256k1_v0_4_1_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata);
rustsecp256k1_v0_4_1_ecdsa_signature_save(signature, &r, &s);
return ret;
}
int rustsecp256k1_v0_4_0_ec_seckey_verify(const rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seckey) {
rustsecp256k1_v0_4_0_scalar sec;
int rustsecp256k1_v0_4_1_ec_seckey_verify(const rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seckey) {
rustsecp256k1_v0_4_1_scalar sec;
int ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_0_scalar_clear(&sec);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_1_scalar_clear(&sec);
return ret;
}
static int rustsecp256k1_v0_4_0_ec_pubkey_create_helper(const rustsecp256k1_v0_4_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_0_scalar *seckey_scalar, rustsecp256k1_v0_4_0_ge *p, const unsigned char *seckey) {
rustsecp256k1_v0_4_0_gej pj;
static int rustsecp256k1_v0_4_1_ec_pubkey_create_helper(const rustsecp256k1_v0_4_1_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_4_1_scalar *seckey_scalar, rustsecp256k1_v0_4_1_ge *p, const unsigned char *seckey) {
rustsecp256k1_v0_4_1_gej pj;
int ret;
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(seckey_scalar, seckey);
rustsecp256k1_v0_4_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_0_scalar_one, !ret);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(seckey_scalar, seckey);
rustsecp256k1_v0_4_1_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_4_1_scalar_one, !ret);
rustsecp256k1_v0_4_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar);
rustsecp256k1_v0_4_0_ge_set_gej(p, &pj);
rustsecp256k1_v0_4_1_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar);
rustsecp256k1_v0_4_1_ge_set_gej(p, &pj);
return ret;
}
int rustsecp256k1_v0_4_0_ec_pubkey_create(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *seckey) {
rustsecp256k1_v0_4_0_ge p;
rustsecp256k1_v0_4_0_scalar seckey_scalar;
int rustsecp256k1_v0_4_1_ec_pubkey_create(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *seckey) {
rustsecp256k1_v0_4_1_ge p;
rustsecp256k1_v0_4_1_scalar seckey_scalar;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(seckey != NULL);
ret = rustsecp256k1_v0_4_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey);
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p);
rustsecp256k1_v0_4_0_memczero(pubkey, sizeof(*pubkey), !ret);
ret = rustsecp256k1_v0_4_1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey);
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p);
rustsecp256k1_v0_4_1_memczero(pubkey, sizeof(*pubkey), !ret);
rustsecp256k1_v0_4_0_scalar_clear(&seckey_scalar);
rustsecp256k1_v0_4_1_scalar_clear(&seckey_scalar);
return ret;
}
int rustsecp256k1_v0_4_0_ec_seckey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) {
rustsecp256k1_v0_4_0_scalar sec;
int rustsecp256k1_v0_4_1_ec_seckey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) {
rustsecp256k1_v0_4_1_scalar sec;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret);
rustsecp256k1_v0_4_0_scalar_negate(&sec, &sec);
rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey);
rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret);
rustsecp256k1_v0_4_1_scalar_negate(&sec, &sec);
rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec);
rustsecp256k1_v0_4_0_scalar_clear(&sec);
rustsecp256k1_v0_4_1_scalar_clear(&sec);
return ret;
}
int rustsecp256k1_v0_4_0_ec_privkey_negate(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey) {
return rustsecp256k1_v0_4_0_ec_seckey_negate(ctx, seckey);
int rustsecp256k1_v0_4_1_ec_privkey_negate(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey) {
return rustsecp256k1_v0_4_1_ec_seckey_negate(ctx, seckey);
}
int rustsecp256k1_v0_4_0_ec_pubkey_negate(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey) {
int rustsecp256k1_v0_4_1_ec_pubkey_negate(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey) {
int ret = 0;
rustsecp256k1_v0_4_0_ge p;
rustsecp256k1_v0_4_1_ge p;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey);
ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
if (ret) {
rustsecp256k1_v0_4_0_ge_neg(&p, &p);
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p);
rustsecp256k1_v0_4_1_ge_neg(&p, &p);
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p);
}
return ret;
}
static int rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_0_scalar *sec, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_scalar term;
static int rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(rustsecp256k1_v0_4_1_scalar *sec, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_scalar term;
int overflow = 0;
int ret = 0;
rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow);
ret = (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_add(sec, &term);
rustsecp256k1_v0_4_0_scalar_clear(&term);
rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow);
ret = (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_add(sec, &term);
rustsecp256k1_v0_4_1_scalar_clear(&term);
return ret;
}
int rustsecp256k1_v0_4_0_ec_seckey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_scalar sec;
int rustsecp256k1_v0_4_1_ec_seckey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_scalar sec;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ARG_CHECK(tweak32 != NULL);
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey);
ret &= rustsecp256k1_v0_4_0_ec_seckey_tweak_add_helper(&sec, tweak32);
rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret);
rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey);
ret &= rustsecp256k1_v0_4_1_ec_seckey_tweak_add_helper(&sec, tweak32);
rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret);
rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec);
rustsecp256k1_v0_4_0_scalar_clear(&sec);
rustsecp256k1_v0_4_1_scalar_clear(&sec);
return ret;
}
int rustsecp256k1_v0_4_0_ec_privkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
return rustsecp256k1_v0_4_0_ec_seckey_tweak_add(ctx, seckey, tweak32);
int rustsecp256k1_v0_4_1_ec_privkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
return rustsecp256k1_v0_4_1_ec_seckey_tweak_add(ctx, seckey, tweak32);
}
static int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_0_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_0_ge *p, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_scalar term;
static int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(const rustsecp256k1_v0_4_1_ecmult_context* ecmult_ctx, rustsecp256k1_v0_4_1_ge *p, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_scalar term;
int overflow = 0;
rustsecp256k1_v0_4_0_scalar_set_b32(&term, tweak32, &overflow);
return !overflow && rustsecp256k1_v0_4_0_eckey_pubkey_tweak_add(ecmult_ctx, p, &term);
rustsecp256k1_v0_4_1_scalar_set_b32(&term, tweak32, &overflow);
return !overflow && rustsecp256k1_v0_4_1_eckey_pubkey_tweak_add(ecmult_ctx, p, &term);
}
int rustsecp256k1_v0_4_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_ge p;
int rustsecp256k1_v0_4_1_ec_pubkey_tweak_add(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_ge p;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(pubkey != NULL);
ARG_CHECK(tweak32 != NULL);
ret = rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey);
ret = rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
ret = ret && rustsecp256k1_v0_4_0_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32);
ret = ret && rustsecp256k1_v0_4_1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak32);
if (ret) {
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p);
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p);
}
return ret;
}
int rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_scalar factor;
rustsecp256k1_v0_4_0_scalar sec;
int rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_scalar factor;
rustsecp256k1_v0_4_1_scalar sec;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ARG_CHECK(tweak32 != NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow);
ret = rustsecp256k1_v0_4_0_scalar_set_b32_seckey(&sec, seckey);
ret &= (!overflow) & rustsecp256k1_v0_4_0_eckey_privkey_tweak_mul(&sec, &factor);
rustsecp256k1_v0_4_0_scalar_cmov(&sec, &rustsecp256k1_v0_4_0_scalar_zero, !ret);
rustsecp256k1_v0_4_0_scalar_get_b32(seckey, &sec);
rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow);
ret = rustsecp256k1_v0_4_1_scalar_set_b32_seckey(&sec, seckey);
ret &= (!overflow) & rustsecp256k1_v0_4_1_eckey_privkey_tweak_mul(&sec, &factor);
rustsecp256k1_v0_4_1_scalar_cmov(&sec, &rustsecp256k1_v0_4_1_scalar_zero, !ret);
rustsecp256k1_v0_4_1_scalar_get_b32(seckey, &sec);
rustsecp256k1_v0_4_0_scalar_clear(&sec);
rustsecp256k1_v0_4_0_scalar_clear(&factor);
rustsecp256k1_v0_4_1_scalar_clear(&sec);
rustsecp256k1_v0_4_1_scalar_clear(&factor);
return ret;
}
int rustsecp256k1_v0_4_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
return rustsecp256k1_v0_4_0_ec_seckey_tweak_mul(ctx, seckey, tweak32);
int rustsecp256k1_v0_4_1_ec_privkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, unsigned char *seckey, const unsigned char *tweak32) {
return rustsecp256k1_v0_4_1_ec_seckey_tweak_mul(ctx, seckey, tweak32);
}
int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_0_ge p;
rustsecp256k1_v0_4_0_scalar factor;
int rustsecp256k1_v0_4_1_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubkey, const unsigned char *tweak32) {
rustsecp256k1_v0_4_1_ge p;
rustsecp256k1_v0_4_1_scalar factor;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(rustsecp256k1_v0_4_1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(pubkey != NULL);
ARG_CHECK(tweak32 != NULL);
rustsecp256k1_v0_4_0_scalar_set_b32(&factor, tweak32, &overflow);
ret = !overflow && rustsecp256k1_v0_4_0_pubkey_load(ctx, &p, pubkey);
rustsecp256k1_v0_4_1_scalar_set_b32(&factor, tweak32, &overflow);
ret = !overflow && rustsecp256k1_v0_4_1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
if (ret) {
if (rustsecp256k1_v0_4_0_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) {
rustsecp256k1_v0_4_0_pubkey_save(pubkey, &p);
if (rustsecp256k1_v0_4_1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) {
rustsecp256k1_v0_4_1_pubkey_save(pubkey, &p);
} else {
ret = 0;
}
@ -688,35 +719,35 @@ int rustsecp256k1_v0_4_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_4_0_context*
return ret;
}
int rustsecp256k1_v0_4_0_context_randomize(rustsecp256k1_v0_4_0_context* ctx, const unsigned char *seed32) {
int rustsecp256k1_v0_4_1_context_randomize(rustsecp256k1_v0_4_1_context* ctx, const unsigned char *seed32) {
VERIFY_CHECK(ctx != NULL);
if (rustsecp256k1_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
rustsecp256k1_v0_4_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
if (rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
rustsecp256k1_v0_4_1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
}
return 1;
}
int rustsecp256k1_v0_4_0_ec_pubkey_combine(const rustsecp256k1_v0_4_0_context* ctx, rustsecp256k1_v0_4_0_pubkey *pubnonce, const rustsecp256k1_v0_4_0_pubkey * const *pubnonces, size_t n) {
int rustsecp256k1_v0_4_1_ec_pubkey_combine(const rustsecp256k1_v0_4_1_context* ctx, rustsecp256k1_v0_4_1_pubkey *pubnonce, const rustsecp256k1_v0_4_1_pubkey * const *pubnonces, size_t n) {
size_t i;
rustsecp256k1_v0_4_0_gej Qj;
rustsecp256k1_v0_4_0_ge Q;
rustsecp256k1_v0_4_1_gej Qj;
rustsecp256k1_v0_4_1_ge Q;
ARG_CHECK(pubnonce != NULL);
memset(pubnonce, 0, sizeof(*pubnonce));
ARG_CHECK(n >= 1);
ARG_CHECK(pubnonces != NULL);
rustsecp256k1_v0_4_0_gej_set_infinity(&Qj);
rustsecp256k1_v0_4_1_gej_set_infinity(&Qj);
for (i = 0; i < n; i++) {
rustsecp256k1_v0_4_0_pubkey_load(ctx, &Q, pubnonces[i]);
rustsecp256k1_v0_4_0_gej_add_ge(&Qj, &Qj, &Q);
rustsecp256k1_v0_4_1_pubkey_load(ctx, &Q, pubnonces[i]);
rustsecp256k1_v0_4_1_gej_add_ge(&Qj, &Qj, &Q);
}
if (rustsecp256k1_v0_4_0_gej_is_infinity(&Qj)) {
if (rustsecp256k1_v0_4_1_gej_is_infinity(&Qj)) {
return 0;
}
rustsecp256k1_v0_4_0_ge_set_gej(&Q, &Qj);
rustsecp256k1_v0_4_0_pubkey_save(pubnonce, &Q);
rustsecp256k1_v0_4_1_ge_set_gej(&Q, &Qj);
rustsecp256k1_v0_4_1_pubkey_save(pubnonce, &Q);
return 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -11,22 +11,22 @@
#include <string.h>
static int rustsecp256k1_v0_4_0_selftest_sha256(void) {
static int rustsecp256k1_v0_4_1_selftest_sha256(void) {
static const char *input63 = "For this sample, this 63-byte string will be used as input data";
static const unsigned char output32[32] = {
0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e,
0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42,
};
unsigned char out[32];
rustsecp256k1_v0_4_0_sha256 hasher;
rustsecp256k1_v0_4_0_sha256_initialize(&hasher);
rustsecp256k1_v0_4_0_sha256_write(&hasher, (const unsigned char*)input63, 63);
rustsecp256k1_v0_4_0_sha256_finalize(&hasher, out);
return rustsecp256k1_v0_4_0_memcmp_var(out, output32, 32) == 0;
rustsecp256k1_v0_4_1_sha256 hasher;
rustsecp256k1_v0_4_1_sha256_initialize(&hasher);
rustsecp256k1_v0_4_1_sha256_write(&hasher, (const unsigned char*)input63, 63);
rustsecp256k1_v0_4_1_sha256_finalize(&hasher, out);
return rustsecp256k1_v0_4_1_memcmp_var(out, output32, 32) == 0;
}
static int rustsecp256k1_v0_4_0_selftest(void) {
return rustsecp256k1_v0_4_0_selftest_sha256();
static int rustsecp256k1_v0_4_1_selftest(void) {
return rustsecp256k1_v0_4_1_selftest_sha256();
}
#endif /* SECP256K1_SELFTEST_H */

View File

@ -14,34 +14,34 @@
/* A non-cryptographic RNG used only for test infrastructure. */
/** Seed the pseudorandom number generator for testing. */
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_testrand_seed(const unsigned char *seed16);
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_testrand_seed(const unsigned char *seed16);
/** Generate a pseudorandom number in the range [0..2**32-1]. */
static uint32_t rustsecp256k1_v0_4_0_testrand32(void);
static uint32_t rustsecp256k1_v0_4_1_testrand32(void);
/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
* more. */
static uint32_t rustsecp256k1_v0_4_0_testrand_bits(int bits);
static uint32_t rustsecp256k1_v0_4_1_testrand_bits(int bits);
/** Generate a pseudorandom number in the range [0..range-1]. */
static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range);
static uint32_t rustsecp256k1_v0_4_1_testrand_int(uint32_t range);
/** Generate a pseudorandom 32-byte array. */
static void rustsecp256k1_v0_4_0_testrand256(unsigned char *b32);
static void rustsecp256k1_v0_4_1_testrand256(unsigned char *b32);
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
static void rustsecp256k1_v0_4_0_testrand256_test(unsigned char *b32);
static void rustsecp256k1_v0_4_1_testrand256_test(unsigned char *b32);
/** Generate pseudorandom bytes with long sequences of zero and one bits. */
static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_t len);
static void rustsecp256k1_v0_4_1_testrand_bytes_test(unsigned char *bytes, size_t len);
/** Flip a single random bit in a byte array */
static void rustsecp256k1_v0_4_0_testrand_flip(unsigned char *b, size_t len);
static void rustsecp256k1_v0_4_1_testrand_flip(unsigned char *b, size_t len);
/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */
static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed);
static void rustsecp256k1_v0_4_1_testrand_init(const char* hexseed);
/** Print final test information. */
static void rustsecp256k1_v0_4_0_testrand_finish(void);
static void rustsecp256k1_v0_4_1_testrand_finish(void);
#endif /* SECP256K1_TESTRAND_H */

View File

@ -14,38 +14,38 @@
#include "testrand.h"
#include "hash.h"
static rustsecp256k1_v0_4_0_rfc6979_hmac_sha256 rustsecp256k1_v0_4_0_test_rng;
static uint32_t rustsecp256k1_v0_4_0_test_rng_precomputed[8];
static int rustsecp256k1_v0_4_0_test_rng_precomputed_used = 8;
static uint64_t rustsecp256k1_v0_4_0_test_rng_integer;
static int rustsecp256k1_v0_4_0_test_rng_integer_bits_left = 0;
static rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rustsecp256k1_v0_4_1_test_rng;
static uint32_t rustsecp256k1_v0_4_1_test_rng_precomputed[8];
static int rustsecp256k1_v0_4_1_test_rng_precomputed_used = 8;
static uint64_t rustsecp256k1_v0_4_1_test_rng_integer;
static int rustsecp256k1_v0_4_1_test_rng_integer_bits_left = 0;
SECP256K1_INLINE static void rustsecp256k1_v0_4_0_testrand_seed(const unsigned char *seed16) {
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_initialize(&rustsecp256k1_v0_4_0_test_rng, seed16, 16);
SECP256K1_INLINE static void rustsecp256k1_v0_4_1_testrand_seed(const unsigned char *seed16) {
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rustsecp256k1_v0_4_1_test_rng, seed16, 16);
}
SECP256K1_INLINE static uint32_t rustsecp256k1_v0_4_0_testrand32(void) {
if (rustsecp256k1_v0_4_0_test_rng_precomputed_used == 8) {
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_0_test_rng, (unsigned char*)(&rustsecp256k1_v0_4_0_test_rng_precomputed[0]), sizeof(rustsecp256k1_v0_4_0_test_rng_precomputed));
rustsecp256k1_v0_4_0_test_rng_precomputed_used = 0;
SECP256K1_INLINE static uint32_t rustsecp256k1_v0_4_1_testrand32(void) {
if (rustsecp256k1_v0_4_1_test_rng_precomputed_used == 8) {
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_1_test_rng, (unsigned char*)(&rustsecp256k1_v0_4_1_test_rng_precomputed[0]), sizeof(rustsecp256k1_v0_4_1_test_rng_precomputed));
rustsecp256k1_v0_4_1_test_rng_precomputed_used = 0;
}
return rustsecp256k1_v0_4_0_test_rng_precomputed[rustsecp256k1_v0_4_0_test_rng_precomputed_used++];
return rustsecp256k1_v0_4_1_test_rng_precomputed[rustsecp256k1_v0_4_1_test_rng_precomputed_used++];
}
static uint32_t rustsecp256k1_v0_4_0_testrand_bits(int bits) {
static uint32_t rustsecp256k1_v0_4_1_testrand_bits(int bits) {
uint32_t ret;
if (rustsecp256k1_v0_4_0_test_rng_integer_bits_left < bits) {
rustsecp256k1_v0_4_0_test_rng_integer |= (((uint64_t)rustsecp256k1_v0_4_0_testrand32()) << rustsecp256k1_v0_4_0_test_rng_integer_bits_left);
rustsecp256k1_v0_4_0_test_rng_integer_bits_left += 32;
if (rustsecp256k1_v0_4_1_test_rng_integer_bits_left < bits) {
rustsecp256k1_v0_4_1_test_rng_integer |= (((uint64_t)rustsecp256k1_v0_4_1_testrand32()) << rustsecp256k1_v0_4_1_test_rng_integer_bits_left);
rustsecp256k1_v0_4_1_test_rng_integer_bits_left += 32;
}
ret = rustsecp256k1_v0_4_0_test_rng_integer;
rustsecp256k1_v0_4_0_test_rng_integer >>= bits;
rustsecp256k1_v0_4_0_test_rng_integer_bits_left -= bits;
ret = rustsecp256k1_v0_4_1_test_rng_integer;
rustsecp256k1_v0_4_1_test_rng_integer >>= bits;
rustsecp256k1_v0_4_1_test_rng_integer_bits_left -= bits;
ret &= ((~((uint32_t)0)) >> (32 - bits));
return ret;
}
static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range) {
static uint32_t rustsecp256k1_v0_4_1_testrand_int(uint32_t range) {
/* We want a uniform integer between 0 and range-1, inclusive.
* B is the smallest number such that range <= 2**B.
* two mechanisms implemented here:
@ -77,25 +77,25 @@ static uint32_t rustsecp256k1_v0_4_0_testrand_int(uint32_t range) {
mult = 1;
}
while(1) {
uint32_t x = rustsecp256k1_v0_4_0_testrand_bits(bits);
uint32_t x = rustsecp256k1_v0_4_1_testrand_bits(bits);
if (x < trange) {
return (mult == 1) ? x : (x % range);
}
}
}
static void rustsecp256k1_v0_4_0_testrand256(unsigned char *b32) {
rustsecp256k1_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_0_test_rng, b32, 32);
static void rustsecp256k1_v0_4_1_testrand256(unsigned char *b32) {
rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rustsecp256k1_v0_4_1_test_rng, b32, 32);
}
static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_t len) {
static void rustsecp256k1_v0_4_1_testrand_bytes_test(unsigned char *bytes, size_t len) {
size_t bits = 0;
memset(bytes, 0, len);
while (bits < len * 8) {
int now;
uint32_t val;
now = 1 + (rustsecp256k1_v0_4_0_testrand_bits(6) * rustsecp256k1_v0_4_0_testrand_bits(5) + 16) / 31;
val = rustsecp256k1_v0_4_0_testrand_bits(1);
now = 1 + (rustsecp256k1_v0_4_1_testrand_bits(6) * rustsecp256k1_v0_4_1_testrand_bits(5) + 16) / 31;
val = rustsecp256k1_v0_4_1_testrand_bits(1);
while (now > 0 && bits < len * 8) {
bytes[bits / 8] |= val << (bits % 8);
now--;
@ -104,15 +104,15 @@ static void rustsecp256k1_v0_4_0_testrand_bytes_test(unsigned char *bytes, size_
}
}
static void rustsecp256k1_v0_4_0_testrand256_test(unsigned char *b32) {
rustsecp256k1_v0_4_0_testrand_bytes_test(b32, 32);
static void rustsecp256k1_v0_4_1_testrand256_test(unsigned char *b32) {
rustsecp256k1_v0_4_1_testrand_bytes_test(b32, 32);
}
static void rustsecp256k1_v0_4_0_testrand_flip(unsigned char *b, size_t len) {
b[rustsecp256k1_v0_4_0_testrand_int(len)] ^= (1 << rustsecp256k1_v0_4_0_testrand_int(8));
static void rustsecp256k1_v0_4_1_testrand_flip(unsigned char *b, size_t len) {
b[rustsecp256k1_v0_4_1_testrand_int(len)] ^= (1 << rustsecp256k1_v0_4_1_testrand_int(8));
}
static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) {
static void rustsecp256k1_v0_4_1_testrand_init(const char* hexseed) {
unsigned char seed16[16] = {0};
if (hexseed && strlen(hexseed) != 0) {
int pos = 0;
@ -127,7 +127,7 @@ static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) {
pos++;
}
} else {
FILE *frand = fopen("/dev/urandom", "r");
FILE *frand = fopen("/dev/urandom", "rb");
if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
uint64_t t = time(NULL) * (uint64_t)1337;
fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
@ -146,12 +146,12 @@ static void rustsecp256k1_v0_4_0_testrand_init(const char* hexseed) {
}
printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
rustsecp256k1_v0_4_0_testrand_seed(seed16);
rustsecp256k1_v0_4_1_testrand_seed(seed16);
}
static void rustsecp256k1_v0_4_0_testrand_finish(void) {
static void rustsecp256k1_v0_4_1_testrand_finish(void) {
unsigned char run32[32];
rustsecp256k1_v0_4_0_testrand256(run32);
rustsecp256k1_v0_4_1_testrand256(run32);
printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More