Update libsecp256k1 upstream to 143dc6e9ee31852a60321b23eea407d2006171da

This commit is contained in:
Elichai Turkel 2019-05-28 15:23:28 +03:00
parent 0fbc0f99ab
commit ce9efb0441
No known key found for this signature in database
GPG Key ID: 9383CDE9E8E66A7F
32 changed files with 1244 additions and 453 deletions

View File

@ -1,5 +1,5 @@
language: c
sudo: false
os: linux
addons:
apt:
packages: libgmp-dev
@ -66,4 +66,3 @@ script:
- if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi
- if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi
- ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY --enable-jni=$JNI $EXTRAFLAGS $USE_HOST && make -j2 $BUILD
os: linux

View File

@ -8,6 +8,7 @@ else
JNI_LIB =
endif
include_HEADERS = include/secp256k1.h
include_HEADERS += include/secp256k1_preallocated.h
noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
@ -114,7 +115,7 @@ exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDE
if !ENABLE_COVERAGE
exhaustive_tests_CPPFLAGS += -DVERIFY
endif
exhaustive_tests_LDADD = $(SECP_LIBS)
exhaustive_tests_LDADD = $(SECP_LIBS) $(COMMON_LIB)
exhaustive_tests_LDFLAGS = -static
TESTS += exhaustive_tests
endif
@ -151,7 +152,6 @@ endif
if USE_ECMULT_STATIC_PRECOMPUTATION
CPPFLAGS_FOR_BUILD +=-I$(top_srcdir)
CFLAGS_FOR_BUILD += -Wall -Wextra -Wno-unused-function
gen_context_OBJECTS = gen_context.o
gen_context_BIN = gen_context$(BUILD_EXEEXT)
@ -159,7 +159,7 @@ gen_%.o: src/gen_%.c
$(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
$(gen_context_BIN): $(gen_context_OBJECTS)
$(CC_FOR_BUILD) $^ -o $@
$(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
$(tests_OBJECTS): src/ecmult_static_context.h

View File

@ -85,42 +85,42 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
])
AC_ARG_ENABLE(benchmark,
AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is yes)]),
AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]),
[use_benchmark=$enableval],
[use_benchmark=yes])
AC_ARG_ENABLE(coverage,
AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]),
AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]),
[enable_coverage=$enableval],
[enable_coverage=no])
AC_ARG_ENABLE(tests,
AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]),
AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]),
[use_tests=$enableval],
[use_tests=yes])
AC_ARG_ENABLE(openssl_tests,
AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests, if OpenSSL is available (default is auto)]),
AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests [default=auto]]),
[enable_openssl_tests=$enableval],
[enable_openssl_tests=auto])
AC_ARG_ENABLE(experimental,
AS_HELP_STRING([--enable-experimental],[allow experimental configure options (default is no)]),
AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]),
[use_experimental=$enableval],
[use_experimental=no])
AC_ARG_ENABLE(exhaustive_tests,
AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]),
AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]),
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
AC_ARG_ENABLE(endomorphism,
AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]),
AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
[use_endomorphism=$enableval],
[use_endomorphism=no])
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]),
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
[use_ecmult_static_precomputation=auto])
@ -130,65 +130,100 @@ AC_ARG_ENABLE(module_ecdh,
[enable_module_ecdh=no])
AC_ARG_ENABLE(module_recovery,
AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]),
AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]),
[enable_module_recovery=$enableval],
[enable_module_recovery=no])
AC_ARG_ENABLE(external_default_callbacks,
AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions (default is no)]),
[use_external_default_callbacks=$enableval],
[use_external_default_callbacks=no])
AC_ARG_ENABLE(jni,
AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni (default is no)]),
AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]),
[use_jni=$enableval],
[use_jni=no])
AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto],
[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto])
[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto])
[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto])
AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto])
[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto]
[Specify assembly optimizations to use. Default is auto (experimental: arm)])],[req_asm=$withval], [req_asm=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
)],
[req_ecmult_window=$withval], [req_ecmult_window=auto])
AC_CHECK_TYPES([__int128])
AC_MSG_CHECKING([for __builtin_expect])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[void myfunc() {__builtin_expect(0,0);}]])],
[ AC_MSG_RESULT([yes]);AC_DEFINE(HAVE_BUILTIN_EXPECT,1,[Define this symbol if __builtin_expect is available]) ],
[ AC_MSG_RESULT([no])
])
if test x"$enable_coverage" = x"yes"; then
AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code])
CFLAGS="$CFLAGS -O0 --coverage"
LDFLAGS="--coverage"
LDFLAGS="$LDFLAGS --coverage"
else
CFLAGS="$CFLAGS -O3"
fi
if test x"$use_ecmult_static_precomputation" != x"no"; then
# Temporarily switch to an environment for the native compiler
save_cross_compiling=$cross_compiling
cross_compiling=no
TEMP_CC="$CC"
SAVE_CC="$CC"
CC="$CC_FOR_BUILD"
AC_MSG_CHECKING([native compiler: ${CC_FOR_BUILD}])
SAVE_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS_FOR_BUILD"
SAVE_CPPFLAGS="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS_FOR_BUILD"
SAVE_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS_FOR_BUILD"
warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function"
saved_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $warn_CFLAGS_FOR_BUILD"
AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
[ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
CFLAGS="$saved_CFLAGS"
])
AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}])
AC_RUN_IFELSE(
[AC_LANG_PROGRAM([], [return 0])],
[AC_LANG_PROGRAM([], [])],
[working_native_cc=yes],
[working_native_cc=no],[dnl])
CC="$TEMP_CC"
CFLAGS_FOR_BUILD="$CFLAGS"
# Restore the environment
cross_compiling=$save_cross_compiling
CC="$SAVE_CC"
CFLAGS="$SAVE_CFLAGS"
CPPFLAGS="$SAVE_CPPFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
if test x"$working_native_cc" = x"no"; then
AC_MSG_RESULT([no])
set_precomp=no
m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.])
if test x"$use_ecmult_static_precomputation" = x"yes"; then
AC_MSG_ERROR([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD])
AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
else
AC_MSG_RESULT([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD])
AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
fi
else
AC_MSG_RESULT([ok])
AC_MSG_RESULT([yes])
set_precomp=yes
fi
else
@ -366,6 +401,28 @@ case $set_scalar in
;;
esac
#set ecmult window size
if test x"$req_ecmult_window" = x"auto"; then
set_ecmult_window=15
else
set_ecmult_window=$req_ecmult_window
fi
error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"']
case $set_ecmult_window in
''|*[[!0-9]]*)
# no valid integer
AC_MSG_ERROR($error_window_size)
;;
*)
if test "$set_ecmult_window" -lt 2 -o "$set_ecmult_window" -gt 24 ; then
# not in range
AC_MSG_ERROR($error_window_size)
fi
AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation])
;;
esac
if test x"$use_tests" = x"yes"; then
SECP_OPENSSL_CHECK
if test x"$has_openssl_ec" = x"yes"; then
@ -441,17 +498,9 @@ if test x"$use_external_asm" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
fi
AC_MSG_NOTICE([Using static precomputation: $set_precomp])
AC_MSG_NOTICE([Using assembly optimizations: $set_asm])
AC_MSG_NOTICE([Using field implementation: $set_field])
AC_MSG_NOTICE([Using bignum implementation: $set_bignum])
AC_MSG_NOTICE([Using scalar implementation: $set_scalar])
AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism])
AC_MSG_NOTICE([Building benchmarks: $use_benchmark])
AC_MSG_NOTICE([Building for coverage analysis: $enable_coverage])
AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh])
AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery])
AC_MSG_NOTICE([Using jni: $use_jni])
if test x"$use_external_default_callbacks" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used])
fi
if test x"$enable_experimental" = x"yes"; then
AC_MSG_NOTICE([******])
@ -482,7 +531,7 @@ AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"])
AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"])
AM_CONDITIONAL([USE_JNI], [test x"$use_jni" == x"yes"])
AM_CONDITIONAL([USE_JNI], [test x"$use_jni" = x"yes"])
AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"])
AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"])
@ -492,3 +541,26 @@ unset PKG_CONFIG_PATH
PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP"
AC_OUTPUT
echo
echo "Build Options:"
echo " with endomorphism = $use_endomorphism"
echo " with ecmult precomp = $set_precomp"
echo " with external callbacks = $use_external_default_callbacks"
echo " with jni = $use_jni"
echo " with benchmarks = $use_benchmark"
echo " with coverage = $enable_coverage"
echo " module ecdh = $enable_module_ecdh"
echo " module recovery = $enable_module_recovery"
echo
echo " asm = $set_asm"
echo " bignum = $set_bignum"
echo " field = $set_field"
echo " scalar = $set_scalar"
echo " ecmult window size = $set_ecmult_window"
echo
echo " CC = $CC"
echo " CFLAGS = $CFLAGS"
echo " CPPFLAGS = $CPPFLAGS"
echo " LDFLAGS = $LDFLAGS"
echo

View File

@ -33,9 +33,10 @@ extern "C" {
* verification).
*
* A constructed context can safely be used from multiple threads
* simultaneously, but API call that take a non-const pointer to a context
* simultaneously, but API calls that take a non-const pointer to a context
* need exclusive access to it. In particular this is the case for
* secp256k1_context_destroy and secp256k1_context_randomize.
* secp256k1_context_destroy, secp256k1_context_preallocated_destroy,
* and secp256k1_context_randomize.
*
* Regarding randomization, either do it once at creation time (in which case
* you do not need any locking for the other calls), or use a read-write lock.
@ -163,7 +164,8 @@ typedef int (*secp256k1_nonce_function)(
#define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9)
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
/** Flags to pass to secp256k1_context_create. */
/** Flags to pass to secp256k1_context_create, secp256k1_context_preallocated_size, and
* secp256k1_context_preallocated_create. */
#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
@ -186,7 +188,11 @@ typedef int (*secp256k1_nonce_function)(
*/
SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp;
/** Create a secp256k1 context object.
/** Create a secp256k1 context object (in dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in secp256k1_preallocated.h.
*
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
@ -197,7 +203,11 @@ SECP256K1_API secp256k1_context* secp256k1_context_create(
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
/** Copies a secp256k1 context object.
/** Copy a secp256k1 context object (into dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in secp256k1_preallocated.h.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
@ -206,10 +216,18 @@ SECP256K1_API secp256k1_context* secp256k1_context_clone(
const secp256k1_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Destroy a secp256k1 context object.
/** Destroy a secp256k1 context object (created in dynamically allocated memory).
*
* The context pointer may not be used afterwards.
* Args: ctx: an existing context to destroy (cannot be NULL)
*
* The context to destroy must have been created using secp256k1_context_create
* or secp256k1_context_clone. If the context has instead been created using
* secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone, the
* behaviour is undefined. In that case, secp256k1_context_preallocated_destroy must
* be used instead.
*
* Args: ctx: an existing context to destroy, constructed using
* secp256k1_context_create or secp256k1_context_clone
*/
SECP256K1_API void secp256k1_context_destroy(
secp256k1_context* ctx
@ -229,11 +247,28 @@ SECP256K1_API void secp256k1_context_destroy(
* to cause a crash, though its return value and output arguments are
* undefined.
*
* When this function has not been called (or called with fn==NULL), then the
* default handler will be used. The library provides a default handler which
* writes the message to stderr and calls abort. This default handler can be
* replaced at link time if the preprocessor macro
* USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build
* has been configured with --enable-external-default-callbacks. Then the
* following two symbols must be provided to link against:
* - void secp256k1_default_illegal_callback_fn(const char* message, void* data);
* - void secp256k1_default_error_callback_fn(const char* message, void* data);
* The library can call these default handlers even before a proper callback data
* pointer could have been set using secp256k1_context_set_illegal_callback or
* secp256k1_context_set_illegal_callback, e.g., when the creation of a context
* fails. In this case, the corresponding default handler will be called with
* the data pointer argument set to NULL.
*
* Args: ctx: an existing context object (cannot be NULL)
* In: fun: a pointer to a function to call when an illegal argument is
* passed to the API, taking a message and an opaque pointer
* (NULL restores a default handler that calls abort).
* passed to the API, taking a message and an opaque pointer.
* (NULL restores the default handler.)
* data: the opaque pointer to pass to fun above.
*
* See also secp256k1_context_set_error_callback.
*/
SECP256K1_API void secp256k1_context_set_illegal_callback(
secp256k1_context* ctx,
@ -253,9 +288,12 @@ SECP256K1_API void secp256k1_context_set_illegal_callback(
*
* Args: ctx: an existing context object (cannot be NULL)
* In: fun: a pointer to a function to call when an internal error occurs,
* taking a message and an opaque pointer (NULL restores a default
* handler that calls abort).
* taking a message and an opaque pointer (NULL restores the
* default handler, see secp256k1_context_set_illegal_callback
* for details).
* data: the opaque pointer to pass to fun above.
*
* See also secp256k1_context_set_illegal_callback.
*/
SECP256K1_API void secp256k1_context_set_error_callback(
secp256k1_context* ctx,
@ -267,21 +305,24 @@ SECP256K1_API void secp256k1_context_set_error_callback(
*
* Returns: a newly created scratch space.
* Args: ctx: an existing context object (cannot be NULL)
* In: max_size: maximum amount of memory to allocate
* In: size: amount of memory to be available as scratch space. Some extra
* (<100 bytes) will be allocated for extra accounting.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space* secp256k1_scratch_space_create(
const secp256k1_context* ctx,
size_t max_size
size_t size
) SECP256K1_ARG_NONNULL(1);
/** Destroy a secp256k1 scratch space.
*
* The pointer may not be used afterwards.
* Args: scratch: space to destroy
* Args: ctx: a secp256k1 context object.
* scratch: space to destroy
*/
SECP256K1_API void secp256k1_scratch_space_destroy(
const secp256k1_context* ctx,
secp256k1_scratch_space* scratch
);
) SECP256K1_ARG_NONNULL(1);
/** Parse a variable-length public key into the pubkey object.
*
@ -615,7 +656,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul(
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Updates the context randomization to protect against side-channel leakage.
* Returns: 1: randomization successfully updated
* Returns: 1: randomization successfully updated or nothing to randomize
* 0: error
* Args: ctx: pointer to a context object (cannot be NULL)
* In: seed32: pointer to a 32-byte random seed (NULL resets to initial state)
@ -630,8 +671,14 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul(
* that it does not affect function results, but shields against attacks which
* rely on any input-dependent behaviour.
*
* This function has currently an effect only on contexts initialized for signing
* because randomization is currently used only for signing. However, this is not
* guaranteed and may change in the future. It is safe to call this function on
* contexts not initialized for signing; then it will have no effect and return 1.
*
* You should call this after secp256k1_context_create or
* secp256k1_context_clone, and may call this repeatedly afterwards.
* secp256k1_context_clone (and secp256k1_context_preallocated_create or
* secp256k1_context_clone, resp.), and you may call this repeatedly afterwards.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize(
secp256k1_context* ctx,

View File

@ -0,0 +1,128 @@
#ifndef SECP256K1_PREALLOCATED_H
#define SECP256K1_PREALLOCATED_H
#include "secp256k1.h"
#ifdef __cplusplus
extern "C" {
#endif
/* The module provided by this header file is intended for settings in which it
* is not possible or desirable to rely on dynamic memory allocation. It provides
* functions for creating, cloning, and destroying secp256k1 context objects in a
* contiguous fixed-size block of memory provided by the caller.
*
* Context objects created by functions in this module can be used like contexts
* objects created by functions in secp256k1.h, i.e., they can be passed to any
* API function that excepts a context object (see secp256k1.h for details). The
* only exception is that context objects created by functions in this module
* must be destroyed using secp256k1_context_preallocated_destroy (in this
* module) instead of secp256k1_context_destroy (in secp256k1.h).
*
* It is guaranteed that functions in by this module will not call malloc or its
* friends realloc, calloc, and free.
*/
/** Determine the memory size of a secp256k1 context object to be created in
* caller-provided memory.
*
* The purpose of this function is to determine how much memory must be provided
* to secp256k1_context_preallocated_create.
*
* Returns: the required size of the caller-provided memory block
* In: flags: which parts of the context to initialize.
*/
SECP256K1_API size_t secp256k1_context_preallocated_size(
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
/** Create a secp256k1 context object in caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, which begins with the call to this
* function and ends when a call to secp256k1_context_preallocated_destroy
* (which destroys the context object again) returns. During the lifetime of the
* context object, the caller is obligated not to access this block of memory,
* i.e., the caller may not read or write the memory, e.g., by copying the memory
* contents to a different location or trying to create a second context object
* in the memory. In simpler words, the prealloc pointer (or any pointer derived
* from it) should not be used during the lifetime of the context object.
*
* Returns: a newly created context object.
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least secp256k1_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
* flags: which parts of the context to initialize.
*
* See also secp256k1_context_randomize (in secp256k1.h)
* and secp256k1_context_preallocated_destroy.
*/
SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create(
void* prealloc,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Determine the memory size of a secp256k1 context object to be copied into
* caller-provided memory.
*
* Returns: the required size of the caller-provided memory block.
* In: ctx: an existing context to copy (cannot be NULL)
*/
SECP256K1_API size_t secp256k1_context_preallocated_clone_size(
const secp256k1_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Copy a secp256k1 context object into caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, see the description of
* secp256k1_context_preallocated_create for details.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least secp256k1_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
*/
SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone(
const secp256k1_context* ctx,
void* prealloc
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT;
/** Destroy a secp256k1 context object that has been created in
* caller-provided memory.
*
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using
* secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone.
* If the context has instead been created using secp256k1_context_create or
* secp256k1_context_clone, the behaviour is undefined. In that case,
* secp256k1_context_destroy must be used instead.
*
* If required, it is the responsibility of the caller to deallocate the block
* of memory properly after this function returns, e.g., by calling free on the
* preallocated pointer given to secp256k1_context_preallocated_create or
* secp256k1_context_preallocated_clone.
*
* Args: ctx: an existing context to destroy, constructed using
* secp256k1_context_preallocated_create or
* secp256k1_context_preallocated_clone (cannot be NULL)
*/
SECP256K1_API void secp256k1_context_preallocated_destroy(
secp256k1_context* ctx
);
#ifdef __cplusplus
}
#endif
#endif /* SECP256K1_PREALLOCATED_H */

View File

@ -8,6 +8,6 @@ Description: Optimized C library for EC operations on curve secp256k1
URL: https://github.com/bitcoin-core/secp256k1
Version: @PACKAGE_VERSION@
Cflags: -I${includedir}
Libs.private: @SECP_LIBS@
Libs: -L${libdir} -lsecp256k1
Libs.private: @SECP_LIBS@

View File

@ -16,15 +16,9 @@ Note:
*/
.syntax unified
.arch armv7-a
@ eabi attributes - see readelf -A
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes
.eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no
.eabi_attribute 10, 0 @ Tag_FP_arch = none
.eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP
.eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Aggressive Speed
.eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6
.text
@ Field constants

View File

@ -10,7 +10,10 @@
#ifdef USE_BASIC_CONFIG
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
#undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_10X26
#undef USE_FIELD_5X52
#undef USE_FIELD_INV_BUILTIN
@ -27,6 +30,7 @@
#define USE_SCALAR_INV_BUILTIN 1
#define USE_FIELD_10X26 1
#define USE_SCALAR_8X32 1
#define ECMULT_WINDOW_SIZE 15
#endif /* USE_BASIC_CONFIG */

View File

@ -64,7 +64,7 @@ static void bench_ecmult(void* arg) {
size_t iter;
for (iter = 0; iter < iters; ++iter) {
data->ecmult_multi(&data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
data->offset1 = (data->offset1 + count) % POINTS;
data->offset2 = (data->offset2 + count - 1) % POINTS;
}
@ -139,6 +139,11 @@ int main(int argc, char **argv) {
secp256k1_gej* pubkeys_gej;
size_t scratch_size;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size);
data.ecmult_multi = secp256k1_ecmult_multi_var;
if (argc > 1) {
if(have_flag(argc, argv, "pippenger_wnaf")) {
printf("Using pippenger_wnaf:\n");
@ -146,15 +151,19 @@ int main(int argc, char **argv) {
} else if(have_flag(argc, argv, "strauss_wnaf")) {
printf("Using strauss_wnaf:\n");
data.ecmult_multi = secp256k1_ecmult_strauss_batch_single;
}
} else {
} else if(have_flag(argc, argv, "simple")) {
printf("Using simple algorithm:\n");
data.ecmult_multi = secp256k1_ecmult_multi_var;
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
data.scratch = NULL;
} else {
fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]);
fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n");
return 1;
}
}
/* Allocate stuff */
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size);
data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS);
data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS);
data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS);
@ -172,7 +181,7 @@ int main(int argc, char **argv) {
secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
}
}
secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS, &data.ctx->error_callback);
secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS);
free(pubkeys_gej);
for (i = 1; i <= 8; ++i) {
@ -184,8 +193,10 @@ int main(int argc, char **argv) {
run_test(&data, i << p, 1);
}
}
if (data.scratch != NULL) {
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
}
secp256k1_context_destroy(data.ctx);
secp256k1_scratch_space_destroy(data.scratch);
free(data.scalars);
free(data.pubkeys);
free(data.seckeys);

View File

@ -184,9 +184,11 @@ void bench_field_inverse_var(void* arg) {
void bench_field_sqrt(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_fe t;
for (i = 0; i < 20000; i++) {
secp256k1_fe_sqrt(&data->fe_x, &data->fe_x);
t = data->fe_x;
secp256k1_fe_sqrt(&data->fe_x, &t);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
}
}
@ -251,7 +253,7 @@ void bench_wnaf_const(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A, 256);
secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}

View File

@ -18,7 +18,7 @@ static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char
if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) {
secp256k1_fe x;
return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
} else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) {
} else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
secp256k1_fe x, y;
if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) {
return 0;

View File

@ -20,10 +20,10 @@ typedef struct {
#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx);
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb);
static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
const secp256k1_ecmult_context *src, const secp256k1_callback *cb);
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc);
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src);
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx);
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx);
@ -37,11 +37,12 @@ typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge
* Chooses the right algorithm for a given number of points and scratch space
* size. Resets and overwrites the given scratch space. If the points do not
* fit in the scratch space the algorithm is repeatedly run with batches of
* points.
* points. If no scratch space is given then a simple algorithm is used that
* simply multiplies the points with the corresponding scalars and adds them up.
* Returns: 1 on success (including when inp_g_sc is NULL and n is 0)
* 0 if there is not enough scratch space for a single point or
* callback returns 0
*/
static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n);
static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n);
#endif /* SECP256K1_ECMULT_H */

View File

@ -48,7 +48,7 @@
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) {
static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
int global_sign;
int skew = 0;
int word = 0;
@ -59,8 +59,12 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
int flip;
int bit;
secp256k1_scalar neg_s;
secp256k1_scalar s;
int not_neg_one;
VERIFY_CHECK(w > 0);
VERIFY_CHECK(size > 0);
/* Note that we cannot handle even numbers by negating them to be odd, as is
* done in other implementations, since if our scalars were specified to have
* width < 256 for performance reasons, their negations would have width 256
@ -75,12 +79,13 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
* {1, 2} we want to add to the scalar when ensuring that it's odd. Further
* complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
* we need to special-case it in this logic. */
flip = secp256k1_scalar_is_high(&s);
flip = secp256k1_scalar_is_high(scalar);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
bit = flip ^ !secp256k1_scalar_is_even(&s);
bit = flip ^ !secp256k1_scalar_is_even(scalar);
/* We check for negative one, since adding 2 to it will cause an overflow */
secp256k1_scalar_negate(&neg_s, &s);
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
secp256k1_scalar_negate(&s, scalar);
not_neg_one = !secp256k1_scalar_is_one(&s);
s = *scalar;
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
* that we added two to it and flipped it. In fact for -1 these operations are
@ -93,7 +98,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
while (word * w < size) {
do {
int sign;
int even;
@ -109,7 +114,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
wnaf[word++] = u_last * global_sign;
u_last = u;
}
} while (word * w < size);
wnaf[word] = u * global_sign;
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
@ -132,7 +137,6 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
secp256k1_scalar sc = *scalar;
/* build wnaf representation for q. */
int rsize = size;
@ -140,13 +144,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc);
skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1, 128);
secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1, size);
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
#ifdef USE_ENDOMORPHISM
skew_lam = 0;
#endif

View File

@ -28,10 +28,10 @@ typedef struct {
secp256k1_gej initial;
} secp256k1_ecmult_gen_context;
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx);
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, const secp256k1_callback* cb);
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
const secp256k1_ecmult_gen_context* src, const secp256k1_callback* cb);
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc);
static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src);
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx);
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx);

View File

@ -7,6 +7,7 @@
#ifndef SECP256K1_ECMULT_GEN_IMPL_H
#define SECP256K1_ECMULT_GEN_IMPL_H
#include "util.h"
#include "scalar.h"
#include "group.h"
#include "ecmult_gen.h"
@ -14,23 +15,32 @@
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
#include "ecmult_static_context.h"
#endif
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec));
#else
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
#endif
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
secp256k1_ge prec[1024];
secp256k1_gej gj;
secp256k1_gej nums_gej;
int i, j;
size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
void* const base = *prealloc;
#endif
if (ctx->prec != NULL) {
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
ctx->prec = (secp256k1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
@ -77,7 +87,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb);
secp256k1_ge_set_all_gej_var(prec, precj, 1024);
}
for (j = 0; j < 64; j++) {
for (i = 0; i < 16; i++) {
@ -85,7 +95,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
}
}
#else
(void)cb;
(void)prealloc;
ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
@ -95,27 +105,18 @@ static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_cont
return ctx->prec != NULL;
}
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) {
if (src->prec == NULL) {
dst->prec = NULL;
} else {
static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
memcpy(dst->prec, src->prec, sizeof(*dst->prec));
#else
(void)cb;
dst->prec = src->prec;
#endif
dst->initial = src->initial;
dst->blind = src->blind;
if (src->prec != NULL) {
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->prec = (secp256k1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
}
#else
(void)dst, (void)src;
#endif
}
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
free(ctx->prec);
#endif
secp256k1_scalar_clear(&ctx->blind);
secp256k1_gej_clear(&ctx->initial);
ctx->prec = NULL;

View File

@ -10,6 +10,7 @@
#include <string.h>
#include <stdint.h>
#include "util.h"
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
@ -31,15 +32,31 @@
#else
/* optimal for 128-bit and 256-bit exponents. */
# define WINDOW_A 5
/** larger numbers may result in slightly better performance, at the cost of
exponentially larger precomputed tables. */
#ifdef USE_ENDOMORPHISM
/** Two tables for window size 15: 1.375 MiB. */
#define WINDOW_G 15
#else
/** One table for window size 16: 1.375 MiB. */
#define WINDOW_G 16
/** Larger values for ECMULT_WINDOW_SIZE result in possibly better
* performance at the cost of an exponentially larger precomputed
* table. The exact table size is
* (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
* where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
* be larger due to platform-specific padding and alignment.
* If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
* two tables of this size are used instead of only one.
*/
# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
/* Noone will ever need more than a window size of 24. The code might
* be correct for larger values of ECMULT_WINDOW_SIZE but this is not
* not tested.
*
* The following limitations are known, and there are probably more:
* If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect
* because the size of the memory object that we allocate (in bytes)
* will not fit in a size_t.
* If WINDOW_G > 31 and int has 32 bits, then the code is incorrect
* because certain expressions will overflow.
*/
#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24
# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
#ifdef USE_ENDOMORPHISM
@ -137,24 +154,135 @@ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *p
secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr);
}
static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge_storage *pre, const secp256k1_gej *a, const secp256k1_callback *cb) {
secp256k1_gej *prej = (secp256k1_gej*)checked_malloc(cb, sizeof(secp256k1_gej) * n);
secp256k1_ge *prea = (secp256k1_ge*)checked_malloc(cb, sizeof(secp256k1_ge) * n);
secp256k1_fe *zr = (secp256k1_fe*)checked_malloc(cb, sizeof(secp256k1_fe) * n);
static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) {
secp256k1_gej d;
secp256k1_ge d_ge, p_ge;
secp256k1_gej pj;
secp256k1_fe zi;
secp256k1_fe zr;
secp256k1_fe dx_over_dz_squared;
int i;
/* Compute the odd multiples in Jacobian form. */
secp256k1_ecmult_odd_multiples_table(n, prej, zr, a);
/* Convert them in batch to affine coordinates. */
secp256k1_ge_set_table_gej_var(prea, prej, zr, n);
/* Convert them to compact storage form. */
for (i = 0; i < n; i++) {
secp256k1_ge_to_storage(&pre[i], &prea[i]);
VERIFY_CHECK(!a->infinity);
secp256k1_gej_double_var(&d, a, NULL);
/* First, we perform all the additions in an isomorphic curve obtained by multiplying
* all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use
* `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store
* the resulting y-coordinate and the z-ratio, since we only have enough memory to
* store two field elements. These are sufficient to efficiently undo the isomorphism
* and recompute all the `x`s.
*/
d_ge.x = d.x;
d_ge.y = d.y;
d_ge.infinity = 0;
secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z);
pj.x = p_ge.x;
pj.y = p_ge.y;
pj.z = a->z;
pj.infinity = 0;
for (i = 0; i < (n - 1); i++) {
secp256k1_fe_normalize_var(&pj.y);
secp256k1_fe_to_storage(&pre[i].y, &pj.y);
secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr);
secp256k1_fe_normalize_var(&zr);
secp256k1_fe_to_storage(&pre[i].x, &zr);
}
free(prea);
free(prej);
free(zr);
/* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */
secp256k1_fe_mul(&zi, &pj.z, &d.z);
secp256k1_fe_inv_var(&zi, &zi);
/* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so
* that we can combine it with the saved z-ratios to compute the other zs
* without any more inversions. */
secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi);
secp256k1_ge_to_storage(&pre[n - 1], &p_ge);
/* Compute the actual x-coordinate of D, which will be needed below. */
secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */
secp256k1_fe_sqr(&dx_over_dz_squared, &d.z);
secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x);
/* Going into the second loop, we have set `pre[n-1]` to its final affine
* form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We
* have `zi = (p.z * d.z)^-1`, where
*
* `p.z` is the z-coordinate of the point on the isomorphic curve
* which was ultimately assigned to `pre[n-1]`.
* `d.z` is the multiplier that must be applied to all z-coordinates
* to move from our isomorphic curve back to secp256k1; so the
* product `p.z * d.z` is the z-coordinate of the secp256k1
* point assigned to `pre[n-1]`.
*
* All subsequent inverse-z-coordinates can be obtained by multiplying this
* factor by successive z-ratios, which is much more efficient than directly
* computing each one.
*
* Importantly, these inverse-zs will be coordinates of points on secp256k1,
* while our other stored values come from computations on the isomorphic
* curve. So in the below loop, we will take care not to actually use `zi`
* or any derived values until we're back on secp256k1.
*/
i = n - 1;
while (i > 0) {
secp256k1_fe zi2, zi3;
const secp256k1_fe *rzr;
i--;
secp256k1_ge_from_storage(&p_ge, &pre[i]);
/* For each remaining point, we extract the z-ratio from the stored
* x-coordinate, compute its z^-1 from that, and compute the full
* point from that. */
rzr = &p_ge.x;
secp256k1_fe_mul(&zi, &zi, rzr);
secp256k1_fe_sqr(&zi2, &zi);
secp256k1_fe_mul(&zi3, &zi2, &zi);
/* To compute the actual x-coordinate, we use the stored z ratio and
* y-coordinate, which we obtained from `secp256k1_gej_add_ge_var`
* in the loop above, as well as the inverse of the square of its
* z-coordinate. We store the latter in the `zi2` variable, which is
* computed iteratively starting from the overall Z inverse then
* multiplying by each z-ratio in turn.
*
* Denoting the z-ratio as `rzr`, we observe that it is equal to `h`
* from the inside of the above `gej_add_ge_var` call. This satisfies
*
* rzr = d_x * z^2 - x * d_z^2
*
* where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)`
* are Jacobian coordinates of our desired point -- except both are on
* the isomorphic curve that we were using when we called `gej_add_ge_var`.
* To get back to secp256k1, we must multiply both `z`s by `d_z`, or
* equivalently divide both `x`s by `d_z^2`. Our equation then becomes
*
* rzr = d_x * z^2 / d_z^2 - x
*
* (The left-hand-side, being a ratio of z-coordinates, is unaffected
* by the isomorphism.)
*
* Rearranging to solve for `x`, we have
*
* x = d_x * z^2 / d_z^2 - rzr
*
* But what we actually want is the affine coordinate `X = x/z^2`,
* which will satisfy
*
* X = d_x / d_z^2 - rzr / z^2
* = dx_over_dz_squared - rzr * zi2
*/
secp256k1_fe_mul(&p_ge.x, rzr, &zi2);
secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1);
secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared);
/* y is stored_y/z^3, as we expect */
secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3);
/* Store */
secp256k1_ge_to_storage(&pre[i], &p_ge);
}
}
/** The following two macro retrieves a particular odd multiple from a table
@ -166,7 +294,8 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge
if ((n) > 0) { \
*(r) = (pre)[((n)-1)/2]; \
} else { \
secp256k1_ge_neg((r), &(pre)[(-(n)-1)/2]); \
*(r) = (pre)[(-(n)-1)/2]; \
secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
} \
} while(0)
@ -178,10 +307,17 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge
secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
} else { \
secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
secp256k1_ge_neg((r), (r)); \
secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
} \
} while(0)
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
#ifdef USE_ENDOMORPHISM
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
#endif
;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
#ifdef USE_ENDOMORPHISM
@ -189,8 +325,10 @@ static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
#endif
}
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) {
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
secp256k1_gej gj;
void* const base = *prealloc;
size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
if (ctx->pre_g != NULL) {
return;
@ -199,44 +337,44 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
{
size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G));
/* check for overflow */
VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
}
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj, cb);
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
#ifdef USE_ENDOMORPHISM
{
secp256k1_gej g_128j;
int i;
ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G));
/* check for overflow */
VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
/* calculate 2^128*generator */
g_128j = gj;
for (i = 0; i < 128; i++) {
secp256k1_gej_double_var(&g_128j, &g_128j, NULL);
}
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j, cb);
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
}
#endif
}
static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
const secp256k1_ecmult_context *src, const secp256k1_callback *cb) {
if (src->pre_g == NULL) {
dst->pre_g = NULL;
} else {
size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
memcpy(dst->pre_g, src->pre_g, size);
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
if (src->pre_g != NULL) {
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
#ifdef USE_ENDOMORPHISM
if (src->pre_g_128 == NULL) {
dst->pre_g_128 = NULL;
} else {
size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
memcpy(dst->pre_g_128, src->pre_g_128, size);
if (src->pre_g_128 != NULL) {
dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
#endif
}
@ -246,10 +384,6 @@ static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx
}
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
free(ctx->pre_g);
#ifdef USE_ENDOMORPHISM
free(ctx->pre_g_128);
#endif
secp256k1_ecmult_context_init(ctx);
}
@ -514,52 +648,55 @@ static size_t secp256k1_strauss_scratch_size(size_t n_points) {
return n_points*point_size;
}
static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
secp256k1_gej* points;
secp256k1_scalar* scalars;
struct secp256k1_strauss_state state;
size_t i;
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
secp256k1_gej_set_infinity(r);
if (inp_g_sc == NULL && n_points == 0) {
return 1;
}
if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) {
return 0;
}
points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej));
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej));
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
#else
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
for (i = 0; i < n_points; i++) {
secp256k1_ge point;
if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
secp256k1_gej_set_ge(&points[i], &point);
}
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc);
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) {
return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
}
/** Convert a number to WNAF notation.
@ -848,10 +985,11 @@ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_windo
size_t entries = n_points + 1;
#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
return ((1<<bucket_window) * sizeof(secp256k1_gej) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size);
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
/* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
@ -876,15 +1014,21 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
}
bucket_window = secp256k1_pippenger_bucket_window(n_points);
if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points, bucket_window), PIPPENGER_SCRATCH_OBJECTS)) {
points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
if (points == NULL || scalars == NULL || state_space == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points));
scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars));
state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space));
state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps));
state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, (1<<bucket_window) * sizeof(*buckets));
if (inp_g_sc != NULL) {
scalars[0] = *inp_g_sc;
@ -898,7 +1042,7 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
while (point_idx < n_points) {
if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
idx++;
@ -922,13 +1066,13 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
for(i = 0; i < 1<<bucket_window; i++) {
secp256k1_gej_clear(&buckets[i]);
}
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_pippenger_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_pippenger_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
/**
@ -936,8 +1080,8 @@ static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_contex
* a given scratch space. The function ensures that fewer points may also be
* used.
*/
static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
size_t max_alloc = secp256k1_scratch_max_allocation(scratch, PIPPENGER_SCRATCH_OBJECTS);
static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
int bucket_window;
size_t res = 0;
@ -951,7 +1095,7 @@ static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
#endif
space_overhead = ((1<<bucket_window) * sizeof(secp256k1_gej) + entry_size + sizeof(struct secp256k1_pippenger_state));
space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;
}
@ -972,12 +1116,58 @@ static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
return res;
}
typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
/* Computes ecmult_multi by simply multiplying and adding each point. Does not
* require a scratch space */
static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
size_t point_idx;
secp256k1_scalar szero;
secp256k1_gej tmpj;
secp256k1_scalar_set_int(&szero, 0);
secp256k1_gej_set_infinity(r);
secp256k1_gej_set_infinity(&tmpj);
/* r = inp_g_sc*G */
secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc);
for (point_idx = 0; point_idx < n_points; point_idx++) {
secp256k1_ge point;
secp256k1_gej pointj;
secp256k1_scalar scalar;
if (!cb(&scalar, &point, point_idx, cbdata)) {
return 0;
}
/* r += scalar*point */
secp256k1_gej_set_ge(&pointj, &point);
secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL);
secp256k1_gej_add_var(r, r, &tmpj, NULL);
}
return 1;
}
/* Compute the number of batches and the batch size given the maximum batch size and the
* total number of points */
static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) {
if (max_n_batch_points == 0) {
return 0;
}
if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) {
max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH;
}
if (n == 0) {
*n_batches = 0;
*n_batch_points = 0;
return 1;
}
/* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */
*n_batches = 1 + (n - 1) / max_n_batch_points;
*n_batch_points = 1 + (n - 1) / *n_batches;
return 1;
}
typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
size_t i;
int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
size_t max_points;
int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
size_t n_batches;
size_t n_batch_points;
@ -990,32 +1180,30 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc);
return 1;
}
max_points = secp256k1_pippenger_max_points(scratch);
if (max_points == 0) {
return 0;
} else if (max_points > ECMULT_MAX_POINTS_PER_BATCH) {
max_points = ECMULT_MAX_POINTS_PER_BATCH;
if (scratch == NULL) {
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
n_batches = (n+max_points-1)/max_points;
n_batch_points = (n+n_batches-1)/n_batches;
/* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than
* a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
* As a first step check if there's enough space for Pippenger's algo (which requires less space
* than Strauss' algo) and if not, use the simple algorithm. */
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
f = secp256k1_ecmult_pippenger_batch;
} else {
max_points = secp256k1_strauss_max_points(scratch);
if (max_points == 0) {
return 0;
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
n_batches = (n+max_points-1)/max_points;
n_batch_points = (n+n_batches-1)/n_batches;
f = secp256k1_ecmult_strauss_batch;
}
for(i = 0; i < n_batches; i++) {
size_t nbp = n < n_batch_points ? n : n_batch_points;
size_t offset = n_batch_points*i;
secp256k1_gej tmp;
if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
return 0;
}
secp256k1_gej_add_var(r, r, &tmp, NULL);

View File

@ -10,7 +10,9 @@
#include <stdint.h>
typedef struct {
/* X = sum(i=0..9, elem[i]*2^26) mod n */
/* X = sum(i=0..9, n[i]*2^(i*26)) mod p
* where p = 2^256 - 0x1000003D1
*/
uint32_t n[10];
#ifdef VERIFY
int magnitude;

View File

@ -8,7 +8,6 @@
#define SECP256K1_FIELD_REPR_IMPL_H
#include "util.h"
#include "num.h"
#include "field.h"
#ifdef VERIFY
@ -486,7 +485,8 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t
VERIFY_BITS(b[9], 26);
/** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.
* px is a shorthand for sum(a[i]*b[x-i], i=0..x).
* for 0 <= x <= 9, px is a shorthand for sum(a[i]*b[x-i], i=0..x).
* for 9 <= x <= 18, px is a shorthand for sum(a[i]*b[x-i], i=(x-9)..9)
* Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0].
*/
@ -1069,6 +1069,7 @@ static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp2
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
secp256k1_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY

View File

@ -10,7 +10,9 @@
#include <stdint.h>
typedef struct {
/* X = sum(i=0..4, elem[i]*2^52) mod n */
/* X = sum(i=0..4, n[i]*2^(i*52)) mod p
* where p = 2^256 - 0x1000003D1
*/
uint64_t n[5];
#ifdef VERIFY
int magnitude;

View File

@ -12,7 +12,6 @@
#endif
#include "util.h"
#include "num.h"
#include "field.h"
#if defined(USE_ASM_X86_64)
@ -422,6 +421,7 @@ static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp2
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
secp256k1_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY

View File

@ -32,9 +32,11 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(b[3], 56);
VERIFY_BITS(b[4], 52);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
/* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
* px is a shorthand for sum(a[i]*b[x-i], i=0..x).
* for 0 <= x <= 4, px is a shorthand for sum(a[i]*b[x-i], i=0..x).
* for 4 <= x <= 8, px is a shorthand for sum(a[i]*b[x-i], i=(x-4)..4)
* Note that [x 0 0 0 0 0] = [x*R].
*/

View File

@ -12,6 +12,7 @@
#endif
#include "util.h"
#include "num.h"
#if defined(USE_FIELD_10X26)
#include "field_10x26_impl.h"
@ -48,6 +49,8 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
VERIFY_CHECK(r != a);
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]

View File

@ -8,6 +8,7 @@
#include "basic-config.h"
#include "include/secp256k1.h"
#include "util.h"
#include "field_impl.h"
#include "scalar_impl.h"
#include "group_impl.h"
@ -26,6 +27,7 @@ static const secp256k1_callback default_error_callback = {
int main(int argc, char **argv) {
secp256k1_ecmult_gen_context ctx;
void *prealloc, *base;
int inner;
int outer;
FILE* fp;
@ -45,8 +47,10 @@ int main(int argc, char **argv) {
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n");
base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE);
prealloc = base;
secp256k1_ecmult_gen_context_init(&ctx);
secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback);
secp256k1_ecmult_gen_context_build(&ctx, &prealloc);
for(outer = 0; outer != 64; outer++) {
fprintf(fp,"{\n");
for(inner = 0; inner != 16; inner++) {
@ -65,6 +69,7 @@ int main(int argc, char **argv) {
}
fprintf(fp,"};\n");
secp256k1_ecmult_gen_context_clear(&ctx);
free(base);
fprintf(fp, "#undef SC\n");
fprintf(fp, "#endif\n");

View File

@ -65,12 +65,7 @@ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a);
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb);
/** Set a batch of group elements equal to the inputs given in jacobian
* coordinates (with known z-ratios). zr must contain the known z-ratios such
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */
static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len);
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len);
/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
* the same global z "denominator". zr must contain the known z-ratios such

View File

@ -38,22 +38,22 @@
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 199
const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
);
const int CURVE_B = 4;
static const int CURVE_B = 4;
# elif EXHAUSTIVE_TEST_ORDER == 13
const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
);
const int CURVE_B = 2;
static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
@ -68,7 +68,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
const int CURVE_B = 7;
static const int CURVE_B = 7;
#endif
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
@ -126,46 +126,43 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
r->y = a->y;
}
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) {
secp256k1_fe *az;
secp256k1_fe *azi;
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
secp256k1_fe u;
size_t i;
size_t count = 0;
az = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * len);
size_t last_i = SIZE_MAX;
for (i = 0; i < len; i++) {
if (!a[i].infinity) {
az[count++] = a[i].z;
/* Use destination's x coordinates as scratch space */
if (last_i == SIZE_MAX) {
r[i].x = a[i].z;
} else {
secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
}
last_i = i;
}
}
if (last_i == SIZE_MAX) {
return;
}
secp256k1_fe_inv_var(&u, &r[last_i].x);
azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count);
secp256k1_fe_inv_all_var(azi, az, count);
free(az);
i = last_i;
while (i > 0) {
i--;
if (!a[i].infinity) {
secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u);
secp256k1_fe_mul(&u, &u, &a[last_i].z);
last_i = i;
}
}
VERIFY_CHECK(!a[last_i].infinity);
r[last_i].x = u;
count = 0;
for (i = 0; i < len; i++) {
r[i].infinity = a[i].infinity;
if (!a[i].infinity) {
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]);
}
}
free(azi);
}
static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) {
size_t i = len - 1;
secp256k1_fe zi;
if (len > 0) {
/* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */
secp256k1_fe_inv(&zi, &a[i].z);
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
/* Work out way backwards, using the z-ratios to scale the x/y values. */
while (i > 0) {
secp256k1_fe_mul(&zi, &zi, &zr[i]);
i--;
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
}
}
}
@ -178,6 +175,8 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp
/* The z of the final point gives us the "global Z" for the table. */
r[i].x = a[i].x;
r[i].y = a[i].y;
/* Ensure all y values are in weak normal form for fast negation of points */
secp256k1_fe_normalize_weak(&r[i].y);
*globalz = a[i].z;
r[i].infinity = 0;
zs = zr[i];

View File

@ -376,7 +376,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
/* extract m6 */
"movq %%r8, %q6\n"
: "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
: "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
/* Reduce 385 bits into 258. */
@ -455,7 +455,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
/* extract p4 */
"movq %%r9, %q4\n"
: "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
: "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
/* Reduce 258 bits into 256. */
@ -501,7 +501,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
/* Extract c */
"movq %%r9, %q0\n"
: "=g"(c)
: "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
#else
uint128_t c;

View File

@ -7,33 +7,36 @@
#ifndef _SECP256K1_SCRATCH_
#define _SECP256K1_SCRATCH_
#define SECP256K1_SCRATCH_MAX_FRAMES 5
/* The typedef is used internally; the struct name is used in the public API
* (where it is exposed as a different typedef) */
typedef struct secp256k1_scratch_space_struct {
void *data[SECP256K1_SCRATCH_MAX_FRAMES];
size_t offset[SECP256K1_SCRATCH_MAX_FRAMES];
size_t frame_size[SECP256K1_SCRATCH_MAX_FRAMES];
size_t frame;
/** guard against interpreting this object as other types */
unsigned char magic[8];
/** actual allocated data */
void *data;
/** amount that has been allocated (i.e. `data + offset` is the next
* available pointer) */
size_t alloc_size;
/** maximum size available to allocate */
size_t max_size;
const secp256k1_callback* error_callback;
} secp256k1_scratch;
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size);
static void secp256k1_scratch_destroy(secp256k1_scratch* scratch);
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch);
/** Attempts to allocate a new stack frame with `n` available bytes. Returns 1 on success, 0 on failure */
static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects);
/** Returns an opaque object used to "checkpoint" a scratch space. Used
* with `secp256k1_scratch_apply_checkpoint` to undo allocations. */
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch);
/** Deallocates a stack frame */
static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch);
/** Applies a check point received from `secp256k1_scratch_checkpoint`,
* undoing all allocations since that point. */
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint);
/** Returns the maximum allocation the scratch space will allow */
static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t n_objects);
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t n_objects);
/** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */
static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t n);
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t n);
#endif

View File

@ -7,78 +7,80 @@
#ifndef _SECP256K1_SCRATCH_IMPL_H_
#define _SECP256K1_SCRATCH_IMPL_H_
#include "util.h"
#include "scratch.h"
/* Using 16 bytes alignment because common architectures never have alignment
* requirements above 8 for any of the types we care about. In addition we
* leave some room because currently we don't care about a few bytes.
* TODO: Determine this at configure time. */
#define ALIGNMENT 16
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size) {
secp256k1_scratch* ret = (secp256k1_scratch*)checked_malloc(error_callback, sizeof(*ret));
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) {
const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
void *alloc = checked_malloc(error_callback, base_alloc + size);
secp256k1_scratch* ret = (secp256k1_scratch *)alloc;
if (ret != NULL) {
memset(ret, 0, sizeof(*ret));
ret->max_size = max_size;
ret->error_callback = error_callback;
memcpy(ret->magic, "scratch", 8);
ret->data = (void *) ((char *) alloc + base_alloc);
ret->max_size = size;
}
return ret;
}
static void secp256k1_scratch_destroy(secp256k1_scratch* scratch) {
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
if (scratch != NULL) {
VERIFY_CHECK(scratch->frame == 0);
VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
memset(scratch->magic, 0, sizeof(scratch->magic));
free(scratch);
}
}
static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t objects) {
size_t i = 0;
size_t allocated = 0;
for (i = 0; i < scratch->frame; i++) {
allocated += scratch->frame_size[i];
}
if (scratch->max_size - allocated <= objects * ALIGNMENT) {
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
return scratch->max_size - allocated - objects * ALIGNMENT;
return scratch->alloc_size;
}
static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects) {
VERIFY_CHECK(scratch->frame < SECP256K1_SCRATCH_MAX_FRAMES);
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
if (checkpoint > scratch->alloc_size) {
secp256k1_callback_call(error_callback, "invalid checkpoint");
return;
}
scratch->alloc_size = checkpoint;
}
if (n <= secp256k1_scratch_max_allocation(scratch, objects)) {
n += objects * ALIGNMENT;
scratch->data[scratch->frame] = checked_malloc(scratch->error_callback, n);
if (scratch->data[scratch->frame] == NULL) {
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
scratch->frame_size[scratch->frame] = n;
scratch->offset[scratch->frame] = 0;
scratch->frame++;
return 1;
} else {
if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) {
return 0;
}
return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1);
}
static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch) {
VERIFY_CHECK(scratch->frame > 0);
scratch->frame -= 1;
free(scratch->data[scratch->frame]);
}
static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t size) {
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) {
void *ret;
size_t frame = scratch->frame - 1;
size = ((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
size = ROUND_TO_ALIGN(size);
if (scratch->frame == 0 || size + scratch->offset[frame] > scratch->frame_size[frame]) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return NULL;
}
ret = (void *) ((unsigned char *) scratch->data[frame] + scratch->offset[frame]);
if (size > scratch->max_size - scratch->alloc_size) {
return NULL;
}
ret = (void *) ((char *) scratch->data + scratch->alloc_size);
memset(ret, 0, size);
scratch->offset[frame] += size;
scratch->alloc_size += size;
return ret;
}

View File

@ -5,6 +5,7 @@
**********************************************************************/
#include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h"
#include "util.h"
#include "num_impl.h"
@ -26,28 +27,39 @@
} \
} while(0)
static void default_illegal_callback_fn(const char* str, void* data) {
#define ARG_CHECK_NO_RETURN(cond) do { \
if (EXPECT(!(cond), 0)) { \
secp256k1_callback_call(&ctx->illegal_callback, #cond); \
} \
} while(0)
#ifndef USE_EXTERNAL_DEFAULT_CALLBACKS
#include <stdlib.h>
#include <stdio.h>
static void secp256k1_default_illegal_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
abort();
}
static const secp256k1_callback default_illegal_callback = {
default_illegal_callback_fn,
NULL
};
static void default_error_callback_fn(const char* str, void* data) {
static void secp256k1_default_error_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
abort();
}
#else
void secp256k1_default_illegal_callback_fn(const char* str, void* data);
void secp256k1_default_error_callback_fn(const char* str, void* data);
#endif
static const secp256k1_callback default_error_callback = {
default_error_callback_fn,
static const secp256k1_callback default_illegal_callback = {
secp256k1_default_illegal_callback_fn,
NULL
};
static const secp256k1_callback default_error_callback = {
secp256k1_default_error_callback_fn,
NULL
};
struct secp256k1_context_struct {
secp256k1_ecmult_context ecmult_ctx;
@ -59,20 +71,55 @@ struct secp256k1_context_struct {
static const secp256k1_context secp256k1_context_no_precomp_ = {
{ 0 },
{ 0 },
{ default_illegal_callback_fn, 0 },
{ default_error_callback_fn, 0 }
{ secp256k1_default_illegal_callback_fn, 0 },
{ secp256k1_default_error_callback_fn, 0 }
};
const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_;
secp256k1_context* secp256k1_context_create(unsigned int flags) {
secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context));
size_t secp256k1_context_preallocated_size(unsigned int flags) {
size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
secp256k1_callback_call(&default_illegal_callback,
"Invalid flags");
return 0;
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
}
return ret;
}
size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) {
size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
VERIFY_CHECK(ctx != NULL);
if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
}
if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) {
ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
}
return ret;
}
secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) {
void* const base = prealloc;
size_t prealloc_size;
secp256k1_context* ret;
VERIFY_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_size(flags);
ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size);
ret->illegal_callback = default_illegal_callback;
ret->error_callback = default_error_callback;
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
secp256k1_callback_call(&ret->illegal_callback,
"Invalid flags");
free(ret);
return NULL;
}
@ -80,47 +127,79 @@ secp256k1_context* secp256k1_context_create(unsigned int flags) {
secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback);
secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc);
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback);
secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc);
}
return (secp256k1_context*) ret;
}
secp256k1_context* secp256k1_context_create(unsigned int flags) {
size_t const prealloc_size = secp256k1_context_preallocated_size(flags);
secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size);
if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) {
free(ctx);
return NULL;
}
return ctx;
}
secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) {
size_t prealloc_size;
secp256k1_context* ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
ret = (secp256k1_context*)prealloc;
memcpy(ret, ctx, prealloc_size);
secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx);
secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx);
return ret;
}
secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context));
ret->illegal_callback = ctx->illegal_callback;
ret->error_callback = ctx->error_callback;
secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback);
secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback);
secp256k1_context* ret;
size_t prealloc_size;
VERIFY_CHECK(ctx != NULL);
prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size);
ret = secp256k1_context_preallocated_clone(ctx, ret);
return ret;
}
void secp256k1_context_destroy(secp256k1_context* ctx) {
CHECK(ctx != secp256k1_context_no_precomp);
void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) {
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (ctx != NULL) {
secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
}
}
void secp256k1_context_destroy(secp256k1_context* ctx) {
if (ctx != NULL) {
secp256k1_context_preallocated_destroy(ctx);
free(ctx);
}
}
void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
CHECK(ctx != secp256k1_context_no_precomp);
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (fun == NULL) {
fun = default_illegal_callback_fn;
fun = secp256k1_default_illegal_callback_fn;
}
ctx->illegal_callback.fn = fun;
ctx->illegal_callback.data = data;
}
void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
CHECK(ctx != secp256k1_context_no_precomp);
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (fun == NULL) {
fun = default_error_callback_fn;
fun = secp256k1_default_error_callback_fn;
}
ctx->error_callback.fn = fun;
ctx->error_callback.data = data;
@ -131,8 +210,9 @@ secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context*
return secp256k1_scratch_create(&ctx->error_callback, max_size);
}
void secp256k1_scratch_space_destroy(secp256k1_scratch_space* scratch) {
secp256k1_scratch_destroy(scratch);
void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) {
VERIFY_CHECK(ctx != NULL);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {
@ -457,6 +537,7 @@ int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *sec
secp256k1_scalar_negate(&sec, &sec);
secp256k1_scalar_get_b32(seckey, &sec);
secp256k1_scalar_clear(&sec);
return 1;
}
@ -570,9 +651,9 @@ int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey
int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) {
VERIFY_CHECK(ctx != NULL);
CHECK(ctx != secp256k1_context_no_precomp);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
}
return 1;
}

View File

@ -16,6 +16,7 @@
#include "secp256k1.c"
#include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h"
#include "testrand_impl.h"
#ifdef ENABLE_OPENSSL_TESTS
@ -137,23 +138,47 @@ void random_scalar_order(secp256k1_scalar *num) {
} while(1);
}
void run_context_tests(void) {
void run_context_tests(int use_prealloc) {
secp256k1_pubkey pubkey;
secp256k1_pubkey zero_pubkey;
secp256k1_ecdsa_signature sig;
unsigned char ctmp[32];
int32_t ecount;
int32_t ecount2;
secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
secp256k1_context *none;
secp256k1_context *sign;
secp256k1_context *vrfy;
secp256k1_context *both;
void *none_prealloc = NULL;
void *sign_prealloc = NULL;
void *vrfy_prealloc = NULL;
void *both_prealloc = NULL;
secp256k1_gej pubj;
secp256k1_ge pub;
secp256k1_scalar msg, key, nonce;
secp256k1_scalar sigr, sigs;
if (use_prealloc) {
none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE));
sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN));
vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY));
both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY));
CHECK(none_prealloc != NULL);
CHECK(sign_prealloc != NULL);
CHECK(vrfy_prealloc != NULL);
CHECK(both_prealloc != NULL);
none = secp256k1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE);
sign = secp256k1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN);
vrfy = secp256k1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY);
both = secp256k1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
} else {
none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
}
memset(&zero_pubkey, 0, sizeof(zero_pubkey));
ecount = 0;
@ -163,14 +188,57 @@ void run_context_tests(void) {
secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL);
CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
/* check if sizes for cloning are consistent */
CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE));
CHECK(secp256k1_context_preallocated_clone_size(sign) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN));
CHECK(secp256k1_context_preallocated_clone_size(vrfy) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY));
CHECK(secp256k1_context_preallocated_clone_size(both) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY));
/*** clone and destroy all of them to make sure cloning was complete ***/
{
secp256k1_context *ctx_tmp;
ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp);
if (use_prealloc) {
/* clone into a non-preallocated context and then again into a new preallocated one. */
ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp);
free(none_prealloc); none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL);
ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, none_prealloc); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp);
free(sign_prealloc); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL);
ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, sign_prealloc); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp);
free(vrfy_prealloc); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL);
ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, vrfy_prealloc); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp);
free(both_prealloc); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL);
ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, both_prealloc); secp256k1_context_destroy(ctx_tmp);
} else {
/* clone into a preallocated context and then again into a new non-preallocated one. */
void *prealloc_tmp;
prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL);
ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp);
free(prealloc_tmp);
prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL);
ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp);
free(prealloc_tmp);
prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL);
ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp);
free(prealloc_tmp);
prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL);
ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp);
free(prealloc_tmp);
}
}
/* Verify that the error callback makes it across the clone. */
@ -218,17 +286,17 @@ void run_context_tests(void) {
CHECK(ecount == 3);
CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1);
CHECK(ecount == 3);
CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_context_randomize(vrfy, ctmp) == 1);
CHECK(ecount == 3);
CHECK(secp256k1_context_randomize(vrfy, NULL) == 1);
CHECK(ecount == 3);
CHECK(secp256k1_context_randomize(sign, ctmp) == 1);
CHECK(ecount2 == 14);
CHECK(secp256k1_context_randomize(sign, NULL) == 1);
CHECK(ecount2 == 14);
secp256k1_context_set_illegal_callback(vrfy, NULL, NULL);
secp256k1_context_set_illegal_callback(sign, NULL, NULL);
/* This shouldn't leak memory, due to already-set tests. */
secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL);
secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL);
/* obtain a working nonce */
do {
random_scalar_order_test(&nonce);
@ -243,49 +311,95 @@ void run_context_tests(void) {
CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg));
/* cleanup */
if (use_prealloc) {
secp256k1_context_preallocated_destroy(none);
secp256k1_context_preallocated_destroy(sign);
secp256k1_context_preallocated_destroy(vrfy);
secp256k1_context_preallocated_destroy(both);
free(none_prealloc);
free(sign_prealloc);
free(vrfy_prealloc);
free(both_prealloc);
} else {
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);
secp256k1_context_destroy(vrfy);
secp256k1_context_destroy(both);
}
/* Defined as no-op. */
secp256k1_context_destroy(NULL);
secp256k1_context_preallocated_destroy(NULL);
}
void run_scratch_tests(void) {
const size_t adj_alloc = ((500 + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
int32_t ecount = 0;
size_t checkpoint;
size_t checkpoint_2;
secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_scratch_space *scratch;
secp256k1_scratch_space local_scratch;
/* Test public API */
secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
scratch = secp256k1_scratch_space_create(none, 1000);
CHECK(scratch != NULL);
CHECK(ecount == 0);
/* Test internal API */
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 1000);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1));
CHECK(scratch->alloc_size == 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
/* Allocating 500 bytes with no frame fails */
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
/* Allocating 500 bytes succeeds */
checkpoint = secp256k1_scratch_checkpoint(&none->error_callback, scratch);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
/* ...but pushing a new stack frame does affect the max allocation */
CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1 == 1));
CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 500); /* 500 - ALIGNMENT */
CHECK(secp256k1_scratch_alloc(scratch, 500) != NULL);
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
/* Allocating another 500 bytes fails */
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1) == 0);
/* ...but it succeeds once we apply the checkpoint to undo it */
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint);
CHECK(scratch->alloc_size == 0);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL);
CHECK(scratch->alloc_size != 0);
/* ...and this effect is undone by popping the frame */
secp256k1_scratch_deallocate_frame(scratch);
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
/* try to apply a bad checkpoint */
checkpoint_2 = secp256k1_scratch_checkpoint(&none->error_callback, scratch);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint);
CHECK(ecount == 0);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */
CHECK(ecount == 1);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */
CHECK(ecount == 2);
/* try to use badly initialized scratch space */
secp256k1_scratch_space_destroy(none, scratch);
memset(&local_scratch, 0, sizeof(local_scratch));
scratch = &local_scratch;
CHECK(!secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0));
CHECK(ecount == 3);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL);
CHECK(ecount == 4);
secp256k1_scratch_space_destroy(none, scratch);
CHECK(ecount == 5);
/* cleanup */
secp256k1_scratch_space_destroy(scratch);
secp256k1_scratch_space_destroy(none, NULL); /* no-op */
secp256k1_context_destroy(none);
}
@ -2095,7 +2209,6 @@ void test_ge(void) {
/* Test batch gej -> ge conversion with and without known z ratios. */
{
secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe));
secp256k1_ge *ge_set_table = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge));
secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge));
for (i = 0; i < 4 * runs + 1; i++) {
/* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */
@ -2103,20 +2216,33 @@ void test_ge(void) {
secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z);
}
}
secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1);
secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback);
secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1);
for (i = 0; i < 4 * runs + 1; i++) {
secp256k1_fe s;
random_fe_non_zero(&s);
secp256k1_gej_rescale(&gej[i], &s);
ge_equals_gej(&ge_set_table[i], &gej[i]);
ge_equals_gej(&ge_set_all[i], &gej[i]);
}
free(ge_set_table);
free(ge_set_all);
free(zr);
}
/* Test batch gej -> ge conversion with many infinities. */
for (i = 0; i < 4 * runs + 1; i++) {
random_group_element_test(&ge[i]);
/* randomly set half the points to infinitiy */
if(secp256k1_fe_is_odd(&ge[i].x)) {
secp256k1_ge_set_infinity(&ge[i]);
}
secp256k1_gej_set_ge(&gej[i], &ge[i]);
}
/* batch invert */
secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1);
/* check result */
for (i = 0; i < 4 * runs + 1; i++) {
ge_equals_gej(&ge[i], &gej[i]);
}
free(ge);
free(gej);
free(zinv);
@ -2556,14 +2682,13 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_gej r;
secp256k1_gej r2;
ecmult_multi_data data;
secp256k1_scratch *scratch_empty;
data.sc = sc;
data.pt = pt;
secp256k1_scalar_set_int(&szero, 0);
/* No points to multiply */
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0));
/* Check 1- and 2-point multiplies against ecmult */
for (ncount = 0; ncount < count; ncount++) {
@ -2579,36 +2704,31 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
/* only G scalar */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* 1-point */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* Try to multiply 1 point, but scratch space is empty */
scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0);
CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(scratch_empty);
/* Try to multiply 1 point, but callback returns false */
CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1));
CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1));
/* 2-point */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* 2-point with G scalar */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2625,7 +2745,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
random_scalar_order(&sc[i]);
secp256k1_ge_set_infinity(&pt[i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2635,7 +2755,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
pt[i] = ptg;
secp256k1_scalar_set_int(&sc[i], 0);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2648,7 +2768,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
pt[2 * i + 1] = ptg;
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
random_scalar_order(&sc[0]);
@ -2661,7 +2781,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2676,7 +2796,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_scalar_negate(&sc[i], &sc[i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2695,7 +2815,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
}
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2718,7 +2838,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_gej_set_ge(&p0j, &pt[0]);
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2731,13 +2851,13 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
}
secp256k1_scalar_clear(&sc[0]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_scalar_clear(&sc[1]);
secp256k1_scalar_clear(&sc[2]);
secp256k1_scalar_clear(&sc[3]);
secp256k1_scalar_clear(&sc[4]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6));
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5));
CHECK(secp256k1_gej_is_infinity(&r));
/* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */
@ -2782,7 +2902,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_scalar_add(&tmp1, &tmp1, &tmp2);
secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2));
secp256k1_gej_neg(&expected, &expected);
secp256k1_gej_add_var(&actual, &actual, &expected, NULL);
CHECK(secp256k1_gej_is_infinity(&actual));
@ -2793,6 +2913,24 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
}
}
void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) {
secp256k1_scalar szero;
secp256k1_scalar sc[32];
secp256k1_ge pt[32];
secp256k1_gej r;
ecmult_multi_data data;
secp256k1_scratch *scratch_empty;
data.sc = sc;
data.pt = pt;
secp256k1_scalar_set_int(&szero, 0);
/* Try to multiply 1 point, but scratch space is empty.*/
scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0);
CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(&ctx->error_callback, scratch_empty);
}
void test_secp256k1_pippenger_bucket_window_inv(void) {
int i;
@ -2823,21 +2961,75 @@ void test_ecmult_multi_pippenger_max_points(void) {
int bucket_window = 0;
for(; scratch_size < max_size; scratch_size+=256) {
size_t i;
size_t total_alloc;
size_t checkpoint;
scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size);
CHECK(scratch != NULL);
n_points_supported = secp256k1_pippenger_max_points(scratch);
checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch);
n_points_supported = secp256k1_pippenger_max_points(&ctx->error_callback, scratch);
if (n_points_supported == 0) {
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
continue;
}
bucket_window = secp256k1_pippenger_bucket_window(n_points_supported);
CHECK(secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points_supported, bucket_window), PIPPENGER_SCRATCH_OBJECTS));
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_destroy(scratch);
/* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */
total_alloc = secp256k1_pippenger_scratch_size(n_points_supported, bucket_window);
for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) {
CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, 1));
total_alloc--;
}
CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, total_alloc));
secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW);
}
void test_ecmult_multi_batch_size_helper(void) {
size_t n_batches, n_batch_points, max_n_batch_points, n;
max_n_batch_points = 0;
n = 1;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0);
max_n_batch_points = 1;
n = 0;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == 0);
CHECK(n_batch_points == 0);
max_n_batch_points = 2;
n = 5;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == 3);
CHECK(n_batch_points == 2);
max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH;
n = ECMULT_MAX_POINTS_PER_BATCH;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == 1);
CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH);
max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1;
n = ECMULT_MAX_POINTS_PER_BATCH + 1;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == 2);
CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1);
max_n_batch_points = 1;
n = SIZE_MAX;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == SIZE_MAX);
CHECK(n_batch_points == 1);
max_n_batch_points = 2;
n = SIZE_MAX;
CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1);
CHECK(n_batches == SIZE_MAX/2 + 1);
CHECK(n_batch_points == 2);
}
/**
* Run secp256k1_ecmult_multi_var with num points and a scratch space restricted to
* 1 <= i <= num points.
@ -2872,19 +3064,25 @@ void test_ecmult_multi_batching(void) {
}
data.sc = sc;
data.pt = pt;
secp256k1_gej_neg(&r2, &r2);
/* Test with empty scratch space */
/* Test with empty scratch space. It should compute the correct result using
* ecmult_mult_simple algorithm which doesn't require a scratch space. */
scratch = secp256k1_scratch_create(&ctx->error_callback, 0);
CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(scratch);
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
/* Test with space for 1 point in pippenger. That's not enough because
* ecmult_multi selects strauss which requires more memory. */
* ecmult_multi selects strauss which requires more memory. It should
* therefore select the simple algorithm. */
scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT);
CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(scratch);
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
secp256k1_gej_neg(&r2, &r2);
for(i = 1; i <= n_points; i++) {
if (i > ECMULT_PIPPENGER_THRESHOLD) {
int bucket_window = secp256k1_pippenger_bucket_window(i);
@ -2894,10 +3092,10 @@ void test_ecmult_multi_batching(void) {
size_t scratch_size = secp256k1_strauss_scratch_size(i);
scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
}
CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
free(sc);
free(pt);
@ -2910,15 +3108,19 @@ void run_ecmult_multi_tests(void) {
test_ecmult_multi_pippenger_max_points();
scratch = secp256k1_scratch_create(&ctx->error_callback, 819200);
test_ecmult_multi(scratch, secp256k1_ecmult_multi_var);
test_ecmult_multi(NULL, secp256k1_ecmult_multi_var);
test_ecmult_multi(scratch, secp256k1_ecmult_pippenger_batch_single);
test_ecmult_multi_batch_single(secp256k1_ecmult_pippenger_batch_single);
test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single);
secp256k1_scratch_destroy(scratch);
test_ecmult_multi_batch_single(secp256k1_ecmult_strauss_batch_single);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
/* Run test_ecmult_multi with space for exactly one point */
scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
test_ecmult_multi(scratch, secp256k1_ecmult_multi_var);
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
test_ecmult_multi_batch_size_helper();
test_ecmult_multi_batching();
}
@ -2988,7 +3190,7 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) {
}
bits = 128;
#endif
skew = secp256k1_wnaf_const(wnaf, num, w, bits);
skew = secp256k1_wnaf_const(wnaf, &num, w, bits);
for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
secp256k1_scalar t;
@ -4978,8 +5180,9 @@ int main(int argc, char **argv) {
}
} else {
FILE *frand = fopen("/dev/urandom", "r");
if ((frand == NULL) || fread(&seed16, sizeof(seed16), 1, frand) != sizeof(seed16)) {
if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
uint64_t t = time(NULL) * (uint64_t)1337;
fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
seed16[0] ^= t;
seed16[1] ^= t >> 8;
seed16[2] ^= t >> 16;
@ -4999,7 +5202,8 @@ int main(int argc, char **argv) {
printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
/* initialize */
run_context_tests();
run_context_tests(0);
run_context_tests(1);
run_scratch_tests();
ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
if (secp256k1_rand_bits(1)) {

View File

@ -212,14 +212,14 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
data.pt[0] = group[x];
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
}
}
}
}
}
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {

View File

@ -36,7 +36,7 @@ static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback *
} while(0)
#endif
#ifdef HAVE_BUILTIN_EXPECT
#if SECP256K1_GNUC_PREREQ(3, 0)
#define EXPECT(x,c) __builtin_expect((x),(c))
#else
#define EXPECT(x,c) (x)
@ -84,6 +84,47 @@ static SECP256K1_INLINE void *checked_realloc(const secp256k1_callback* cb, void
return ret;
}
#if defined(__BIGGEST_ALIGNMENT__)
#define ALIGNMENT __BIGGEST_ALIGNMENT__
#else
/* Using 16 bytes alignment because common architectures never have alignment
* requirements above 8 for any of the types we care about. In addition we
* leave some room because currently we don't care about a few bytes. */
#define ALIGNMENT 16
#endif
#define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT)
/* Assume there is a contiguous memory object with bounds [base, base + max_size)
* of which the memory range [base, *prealloc_ptr) is already allocated for usage,
* where *prealloc_ptr is an aligned pointer. In that setting, this functions
* reserves the subobject [*prealloc_ptr, *prealloc_ptr + alloc_size) of
* alloc_size bytes by increasing *prealloc_ptr accordingly, taking into account
* alignment requirements.
*
* The function returns an aligned pointer to the newly allocated subobject.
*
* This is useful for manual memory management: if we're simply given a block
* [base, base + max_size), the caller can use this function to allocate memory
* in this block and keep track of the current allocation state with *prealloc_ptr.
*
* It is VERIFY_CHECKed that there is enough space left in the memory object and
* *prealloc_ptr is aligned relative to base.
*/
static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_size, void* base, size_t max_size) {
size_t aligned_alloc_size = ROUND_TO_ALIGN(alloc_size);
void* ret;
VERIFY_CHECK(prealloc_ptr != NULL);
VERIFY_CHECK(*prealloc_ptr != NULL);
VERIFY_CHECK(base != NULL);
VERIFY_CHECK((unsigned char*)*prealloc_ptr >= (unsigned char*)base);
VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0);
VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size);
ret = *prealloc_ptr;
*((unsigned char**)prealloc_ptr) += aligned_alloc_size;
return ret;
}
/* Macro for restrict, when available and not in a VERIFY build. */
#if defined(SECP256K1_BUILD) && defined(VERIFY)
# define SECP256K1_RESTRICT