Merge pull request #169 from stevenroose/sys

Add secp256k1-sys with prefixed sources
This commit is contained in:
Andrew Poelstra 2019-12-05 15:03:31 +00:00 committed by GitHub
commit e3aaf00710
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 6754 additions and 6234 deletions

View File

@ -33,6 +33,7 @@
[PR 2](https://github.com/rust-bitcoin/rust-secp256k1/pull/125).
This library should be usable in bare-metal environments and with rust-wasm.
Thanks to Elichai Turkel for driving this forward!
* Update upstream libsecp256k1 version to 143dc6e9ee31852a60321b23eea407d2006171da
# 0.13.0 - 2019-05-21

View File

@ -1,7 +1,7 @@
[package]
name = "secp256k1"
version = "0.16.1"
version = "0.17.0"
authors = [ "Dawid Ciężarkiewicz <dpc@ucore.info>",
"Andrew Poelstra <apoelstra@wpsoftware.net>" ]
license = "CC0-1.0"
@ -11,17 +11,12 @@ documentation = "https://docs.rs/secp256k1/"
description = "Rust bindings for Pieter Wuille's `libsecp256k1` library. Implements ECDSA for the SECG elliptic curve group secp256k1 and related utilities."
keywords = [ "crypto", "ECDSA", "secp256k1", "libsecp256k1", "bitcoin" ]
readme = "README.md"
build = "build.rs"
links = "secp256k1"
autoexamples = false # Remove when edition 2018 https://github.com/rust-lang/cargo/issues/5330
# Should make docs.rs show all functions, even those behind non-default features
[package.metadata.docs.rs]
features = [ "rand", "rand-std", "serde", "recovery", "endomorphism" ]
[build-dependencies]
cc = ">= 1.0.28, < 1.0.42"
[lib]
name = "secp256k1"
path = "src/lib.rs"
@ -29,16 +24,21 @@ path = "src/lib.rs"
[features]
unstable = []
default = ["std"]
std = []
std = ["secp256k1-sys/std"]
rand-std = ["rand/std"]
recovery = []
endomorphism = []
lowmemory = []
recovery = ["secp256k1-sys/recovery"]
endomorphism = ["secp256k1-sys/endomorphism"]
lowmemory = ["secp256k1-sys/lowmemory"]
# Use this feature to not compile the bundled libsecp256k1 C symbols,
# but use external ones. Use this only if you know what you are doing!
external-symbols = ["secp256k1-sys/external-symbols"]
# Do not use this feature! HAZMAT. (meant for Bitcoin Core only)
dont_replace_c_symbols = []
# Do not use this feature! HAZMAT. (meant for Fuzzing only. this is *BROKEN CRYPTOGRAPHY*)
fuzztarget = []
fuzztarget = ["secp256k1-sys/fuzztarget"]
[dependencies]
secp256k1-sys = { version = "0.1.0", default-features = false, path = "./secp256k1-sys" }
[dev-dependencies]
rand = "0.6"

View File

@ -1,21 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECDSA_H
#define SECP256K1_ECDSA_H
#include <stddef.h>
#include "scalar.h"
#include "group.h"
#include "ecmult.h"
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *r, secp256k1_scalar *s, const unsigned char *sig, size_t size);
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar *r, const secp256k1_scalar *s);
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar* r, const secp256k1_scalar* s, const secp256k1_ge *pubkey, const secp256k1_scalar *message);
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid);
#endif /* SECP256K1_ECDSA_H */

View File

@ -1,25 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECKEY_H
#define SECP256K1_ECKEY_H
#include <stddef.h>
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
#include "ecmult_gen.h"
static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size);
static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed);
static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak);
static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak);
static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak);
static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak);
#endif /* SECP256K1_ECKEY_H */

View File

@ -1,100 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECKEY_IMPL_H
#define SECP256K1_ECKEY_IMPL_H
#include "eckey.h"
#include "scalar.h"
#include "field.h"
#include "group.h"
#include "ecmult_gen.h"
static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size) {
if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) {
secp256k1_fe x;
return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
} else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
secp256k1_fe x, y;
if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) {
return 0;
}
secp256k1_ge_set_xy(elem, &x, &y);
if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) &&
secp256k1_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
return 0;
}
return secp256k1_ge_is_valid_var(elem);
} else {
return 0;
}
}
static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed) {
if (secp256k1_ge_is_infinity(elem)) {
return 0;
}
secp256k1_fe_normalize_var(&elem->x);
secp256k1_fe_normalize_var(&elem->y);
secp256k1_fe_get_b32(&pub[1], &elem->x);
if (compressed) {
*size = 33;
pub[0] = secp256k1_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
} else {
*size = 65;
pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED;
secp256k1_fe_get_b32(&pub[33], &elem->y);
}
return 1;
}
static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak) {
secp256k1_scalar_add(key, key, tweak);
if (secp256k1_scalar_is_zero(key)) {
return 0;
}
return 1;
}
static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) {
secp256k1_gej pt;
secp256k1_scalar one;
secp256k1_gej_set_ge(&pt, key);
secp256k1_scalar_set_int(&one, 1);
secp256k1_ecmult(ctx, &pt, &pt, &one, tweak);
if (secp256k1_gej_is_infinity(&pt)) {
return 0;
}
secp256k1_ge_set_gej(key, &pt);
return 1;
}
static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak) {
if (secp256k1_scalar_is_zero(tweak)) {
return 0;
}
secp256k1_scalar_mul(key, key, tweak);
return 1;
}
static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) {
secp256k1_scalar zero;
secp256k1_gej pt;
if (secp256k1_scalar_is_zero(tweak)) {
return 0;
}
secp256k1_scalar_set_int(&zero, 0);
secp256k1_gej_set_ge(&pt, key);
secp256k1_ecmult(ctx, &pt, &pt, tweak, &zero);
secp256k1_ge_set_gej(key, &pt);
return 1;
}
#endif /* SECP256K1_ECKEY_IMPL_H */

View File

@ -1,48 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECMULT_H
#define SECP256K1_ECMULT_H
#include "num.h"
#include "group.h"
#include "scalar.h"
#include "scratch.h"
typedef struct {
/* For accelerating the computation of a*P + b*G: */
secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx);
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc);
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src);
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx);
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx);
/** Double multiply: R = na*A + ng*G */
static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng);
typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data);
/**
* Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai.
* Chooses the right algorithm for a given number of points and scratch space
* size. Resets and overwrites the given scratch space. If the points do not
* fit in the scratch space the algorithm is repeatedly run with batches of
* points. If no scratch space is given then a simple algorithm is used that
* simply multiplies the points with the corresponding scalars and adds them up.
* Returns: 1 on success (including when inp_g_sc is NULL and n is 0)
* 0 if there is not enough scratch space for a single point or
* callback returns 0
*/
static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n);
#endif /* SECP256K1_ECMULT_H */

View File

@ -1,318 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_FIELD_IMPL_H
#define SECP256K1_FIELD_IMPL_H
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
#include "num.h"
#if defined(USE_FIELD_10X26)
#include "field_10x26_impl.h"
#elif defined(USE_FIELD_5X52)
#include "field_5x52_impl.h"
#else
#error "Please select field implementation"
#endif
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe na;
secp256k1_fe_negate(&na, a, 1);
secp256k1_fe_add(&na, b);
return secp256k1_fe_normalizes_to_zero(&na);
}
SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe na;
secp256k1_fe_negate(&na, a, 1);
secp256k1_fe_add(&na, b);
return secp256k1_fe_normalizes_to_zero_var(&na);
}
static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) {
/** Given that p is congruent to 3 mod 4, we can compute the square root of
* a mod p as the (p+1)/4'th power of a.
*
* As (p+1)/4 is an even number, it will have the same result for a and for
* (-a). Only one of these two numbers actually has a square root however,
* so we test at the end by squaring and comparing to the input.
* Also because (p+1)/4 is an even number, the computed square root is
* itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)).
*/
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
VERIFY_CHECK(r != a);
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
secp256k1_fe_sqr(&x2, a);
secp256k1_fe_mul(&x2, &x2, a);
secp256k1_fe_sqr(&x3, &x2);
secp256k1_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x6, &x6);
}
secp256k1_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x9, &x9);
}
secp256k1_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&x11, &x11);
}
secp256k1_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
secp256k1_fe_sqr(&x22, &x22);
}
secp256k1_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
secp256k1_fe_sqr(&x44, &x44);
}
secp256k1_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x88, &x88);
}
secp256k1_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
secp256k1_fe_sqr(&x176, &x176);
}
secp256k1_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x220, &x220);
}
secp256k1_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x223, &x223);
}
secp256k1_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x22);
for (j=0; j<6; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x2);
secp256k1_fe_sqr(&t1, &t1);
secp256k1_fe_sqr(r, &t1);
/* Check that a square root was actually calculated */
secp256k1_fe_sqr(&t1, r);
return secp256k1_fe_equal(&t1, a);
}
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
secp256k1_fe_sqr(&x2, a);
secp256k1_fe_mul(&x2, &x2, a);
secp256k1_fe_sqr(&x3, &x2);
secp256k1_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x6, &x6);
}
secp256k1_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x9, &x9);
}
secp256k1_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&x11, &x11);
}
secp256k1_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
secp256k1_fe_sqr(&x22, &x22);
}
secp256k1_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
secp256k1_fe_sqr(&x44, &x44);
}
secp256k1_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x88, &x88);
}
secp256k1_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
secp256k1_fe_sqr(&x176, &x176);
}
secp256k1_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x220, &x220);
}
secp256k1_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x223, &x223);
}
secp256k1_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x22);
for (j=0; j<5; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, a);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x2);
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(r, a, &t1);
}
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
#if defined(USE_FIELD_INV_BUILTIN)
secp256k1_fe_inv(r, a);
#elif defined(USE_FIELD_INV_NUM)
secp256k1_num n, m;
static const secp256k1_fe negone = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
);
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
unsigned char b[32];
int res;
secp256k1_fe c = *a;
secp256k1_fe_normalize_var(&c);
secp256k1_fe_get_b32(b, &c);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_num_set_bin(&m, prime, 32);
secp256k1_num_mod_inverse(&n, &n, &m);
secp256k1_num_get_bin(b, 32, &n);
res = secp256k1_fe_set_b32(r, b);
(void)res;
VERIFY_CHECK(res);
/* Verify the result is the (unique) valid inverse using non-GMP code. */
secp256k1_fe_mul(&c, &c, r);
secp256k1_fe_add(&c, &negone);
CHECK(secp256k1_fe_normalizes_to_zero_var(&c));
#else
#error "Please select field inverse implementation"
#endif
}
static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) {
secp256k1_fe u;
size_t i;
if (len < 1) {
return;
}
VERIFY_CHECK((r + len <= a) || (a + len <= r));
r[0] = a[0];
i = 0;
while (++i < len) {
secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]);
}
secp256k1_fe_inv_var(&u, &r[--i]);
while (i > 0) {
size_t j = i--;
secp256k1_fe_mul(&r[j], &r[i], &u);
secp256k1_fe_mul(&u, &u, &a[j]);
}
r[0] = u;
}
static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) {
#ifndef USE_NUM_NONE
unsigned char b[32];
secp256k1_num n;
secp256k1_num m;
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
secp256k1_fe c = *a;
secp256k1_fe_normalize_var(&c);
secp256k1_fe_get_b32(b, &c);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_num_set_bin(&m, prime, 32);
return secp256k1_num_jacobi(&n, &m) >= 0;
#else
secp256k1_fe r;
return secp256k1_fe_sqrt(&r, a);
#endif
}
#endif /* SECP256K1_FIELD_IMPL_H */

View File

@ -1,705 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_GROUP_IMPL_H
#define SECP256K1_GROUP_IMPL_H
#include "num.h"
#include "field.h"
#include "group.h"
/* These points can be generated in sage as follows:
*
* 0. Setup a worksheet with the following parameters.
* b = 4 # whatever CURVE_B will be set to
* F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
* C = EllipticCurve ([F (0), F (b)])
*
* 1. Determine all the small orders available to you. (If there are
* no satisfactory ones, go back and change b.)
* print C.order().factor(limit=1000)
*
* 2. Choose an order as one of the prime factors listed in the above step.
* (You can also multiply some to get a composite order, though the
* tests will crash trying to invert scalars during signing.) We take a
* random point and scale it to drop its order to the desired value.
* There is some probability this won't work; just try again.
* order = 199
* P = C.random_point()
* P = (int(P.order()) / int(order)) * P
* assert(P.order() == order)
*
* 3. Print the values. You'll need to use a vim macro or something to
* split the hex output into 4-byte chunks.
* print "%x %x" % P.xy()
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 199
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
);
static const int CURVE_B = 4;
# elif EXHAUSTIVE_TEST_ORDER == 13
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
);
static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
#else
/** Generator for secp256k1, value 'g' defined in
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
*/
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
static const int CURVE_B = 7;
#endif
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
secp256k1_fe zi2;
secp256k1_fe zi3;
secp256k1_fe_sqr(&zi2, zi);
secp256k1_fe_mul(&zi3, &zi2, zi);
secp256k1_fe_mul(&r->x, &a->x, &zi2);
secp256k1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity;
}
static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
r->infinity = 0;
r->x = *x;
r->y = *y;
}
static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
return a->infinity;
}
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
*r = *a;
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1);
}
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3;
r->infinity = a->infinity;
secp256k1_fe_inv(&a->z, &a->z);
secp256k1_fe_sqr(&z2, &a->z);
secp256k1_fe_mul(&z3, &a->z, &z2);
secp256k1_fe_mul(&a->x, &a->x, &z2);
secp256k1_fe_mul(&a->y, &a->y, &z3);
secp256k1_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
}
static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3;
r->infinity = a->infinity;
if (a->infinity) {
return;
}
secp256k1_fe_inv_var(&a->z, &a->z);
secp256k1_fe_sqr(&z2, &a->z);
secp256k1_fe_mul(&z3, &a->z, &z2);
secp256k1_fe_mul(&a->x, &a->x, &z2);
secp256k1_fe_mul(&a->y, &a->y, &z3);
secp256k1_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
}
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
secp256k1_fe u;
size_t i;
size_t last_i = SIZE_MAX;
for (i = 0; i < len; i++) {
if (!a[i].infinity) {
/* Use destination's x coordinates as scratch space */
if (last_i == SIZE_MAX) {
r[i].x = a[i].z;
} else {
secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
}
last_i = i;
}
}
if (last_i == SIZE_MAX) {
return;
}
secp256k1_fe_inv_var(&u, &r[last_i].x);
i = last_i;
while (i > 0) {
i--;
if (!a[i].infinity) {
secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u);
secp256k1_fe_mul(&u, &u, &a[last_i].z);
last_i = i;
}
}
VERIFY_CHECK(!a[last_i].infinity);
r[last_i].x = u;
for (i = 0; i < len; i++) {
r[i].infinity = a[i].infinity;
if (!a[i].infinity) {
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
}
}
}
static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) {
size_t i = len - 1;
secp256k1_fe zs;
if (len > 0) {
/* The z of the final point gives us the "global Z" for the table. */
r[i].x = a[i].x;
r[i].y = a[i].y;
/* Ensure all y values are in weak normal form for fast negation of points */
secp256k1_fe_normalize_weak(&r[i].y);
*globalz = a[i].z;
r[i].infinity = 0;
zs = zr[i];
/* Work our way backwards, using the z-ratios to scale the x/y values. */
while (i > 0) {
if (i != len - 1) {
secp256k1_fe_mul(&zs, &zs, &zr[i]);
}
i--;
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs);
}
}
}
static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
r->infinity = 1;
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z);
}
static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
r->infinity = 1;
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
}
static void secp256k1_gej_clear(secp256k1_gej *r) {
r->infinity = 0;
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z);
}
static void secp256k1_ge_clear(secp256k1_ge *r) {
r->infinity = 0;
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
}
static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
secp256k1_fe x2, x3, c;
r->x = *x;
secp256k1_fe_sqr(&x2, x);
secp256k1_fe_mul(&x3, x, &x2);
r->infinity = 0;
secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&c, &x3);
return secp256k1_fe_sqrt(&r->y, &c);
}
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
if (!secp256k1_ge_set_xquad(r, x)) {
return 0;
}
secp256k1_fe_normalize_var(&r->y);
if (secp256k1_fe_is_odd(&r->y) != odd) {
secp256k1_fe_negate(&r->y, &r->y, 1);
}
return 1;
}
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
secp256k1_fe_set_int(&r->z, 1);
}
static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
secp256k1_fe r, r2;
VERIFY_CHECK(!a->infinity);
secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
r2 = a->x; secp256k1_fe_normalize_weak(&r2);
return secp256k1_fe_equal_var(&r, &r2);
}
static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
r->z = a->z;
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1);
}
static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
return a->infinity;
}
static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
secp256k1_fe y2, x3, z2, z6;
if (a->infinity) {
return 0;
}
/** y^2 = x^3 + 7
* (Y/Z^3)^2 = (X/Z^2)^3 + 7
* Y^2 / Z^6 = X^3 / Z^6 + 7
* Y^2 = X^3 + 7*Z^6
*/
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
secp256k1_fe_sqr(&z2, &a->z);
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
secp256k1_fe_mul_int(&z6, CURVE_B);
secp256k1_fe_add(&x3, &z6);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
secp256k1_fe y2, x3, c;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&x3, &c);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
/* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
*
* Note that there is an implementation described at
* https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
* which trades a multiply for a square, but in practice this is actually slower,
* mainly because it requires more normalizations.
*/
secp256k1_fe t1,t2,t3,t4;
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
* y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
*
* Having said this, if this function receives a point on a sextic twist, e.g. by
* a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
* since -6 does have a cube root mod p. For this point, this function will not set
* the infinity flag even though the point doubles to infinity, and the result
* point will be gibberish (z = 0 but infinity = 0).
*/
r->infinity = a->infinity;
if (r->infinity) {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 1);
}
return;
}
if (rzr != NULL) {
*rzr = a->y;
secp256k1_fe_normalize_weak(rzr);
secp256k1_fe_mul_int(rzr, 2);
}
secp256k1_fe_mul(&r->z, &a->z, &a->y);
secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
secp256k1_fe_sqr(&t1, &a->x);
secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
secp256k1_fe_sqr(&t3, &a->y);
secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
secp256k1_fe_sqr(&t4, &t3);
secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
r->x = t3;
secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
}
static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
secp256k1_gej_double_var(r, a, rzr);
}
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
/* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
*r = *b;
return;
}
if (b->infinity) {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
secp256k1_fe_sqr(&z22, &b->z);
secp256k1_fe_sqr(&z12, &a->z);
secp256k1_fe_mul(&u1, &a->x, &z22);
secp256k1_fe_mul(&u2, &b->x, &z12);
secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0);
}
r->infinity = 1;
}
return;
}
secp256k1_fe_sqr(&i2, &i);
secp256k1_fe_sqr(&h2, &h);
secp256k1_fe_mul(&h3, &h, &h2);
secp256k1_fe_mul(&h, &h, &b->z);
if (rzr != NULL) {
*rzr = h;
}
secp256k1_fe_mul(&r->z, &a->z, &h);
secp256k1_fe_mul(&t, &u1, &h2);
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
secp256k1_fe_add(&r->y, &h3);
}
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
/* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
secp256k1_gej_set_ge(r, b);
return;
}
if (b->infinity) {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
secp256k1_fe_sqr(&z12, &a->z);
u1 = a->x; secp256k1_fe_normalize_weak(&u1);
secp256k1_fe_mul(&u2, &b->x, &z12);
s1 = a->y; secp256k1_fe_normalize_weak(&s1);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0);
}
r->infinity = 1;
}
return;
}
secp256k1_fe_sqr(&i2, &i);
secp256k1_fe_sqr(&h2, &h);
secp256k1_fe_mul(&h3, &h, &h2);
if (rzr != NULL) {
*rzr = h;
}
secp256k1_fe_mul(&r->z, &a->z, &h);
secp256k1_fe_mul(&t, &u1, &h2);
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
secp256k1_fe_add(&r->y, &h3);
}
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
/* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (b->infinity) {
*r = *a;
return;
}
if (a->infinity) {
secp256k1_fe bzinv2, bzinv3;
r->infinity = b->infinity;
secp256k1_fe_sqr(&bzinv2, bzinv);
secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv);
secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
secp256k1_fe_set_int(&r->z, 1);
return;
}
r->infinity = 0;
/** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
* secp256k1's isomorphism we can multiply the Z coordinates on both sides
* by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
* This means that (rx,ry,rz) can be calculated as
* (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
* The variable az below holds the modified Z coordinate for a, which is used
* for the computation of rx and ry, but not for rz.
*/
secp256k1_fe_mul(&az, &a->z, bzinv);
secp256k1_fe_sqr(&z12, &az);
u1 = a->x; secp256k1_fe_normalize_weak(&u1);
secp256k1_fe_mul(&u2, &b->x, &z12);
s1 = a->y; secp256k1_fe_normalize_weak(&s1);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, NULL);
} else {
r->infinity = 1;
}
return;
}
secp256k1_fe_sqr(&i2, &i);
secp256k1_fe_sqr(&h2, &h);
secp256k1_fe_mul(&h3, &h, &h2);
r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
secp256k1_fe_mul(&t, &u1, &h2);
r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
secp256k1_fe_add(&r->y, &h3);
}
static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) {
/* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
secp256k1_fe m_alt, rr_alt;
int infinity, degenerate;
VERIFY_CHECK(!b->infinity);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
/** In:
* Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
* In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
* we find as solution for a unified addition/doubling formula:
* lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
* x3 = lambda^2 - (x1 + x2)
* 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
*
* Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
* U1 = X1*Z2^2, U2 = X2*Z1^2
* S1 = Y1*Z2^3, S2 = Y2*Z1^3
* Z = Z1*Z2
* T = U1+U2
* M = S1+S2
* Q = T*M^2
* R = T^2-U1*U2
* X3 = 4*(R^2-Q)
* Y3 = 4*(R*(3*Q-2*R^2)-M^4)
* Z3 = 2*M*Z
* (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
*
* This formula has the benefit of being the same for both addition
* of distinct points and doubling. However, it breaks down in the
* case that either point is infinity, or that y1 = -y2. We handle
* these cases in the following ways:
*
* - If b is infinity we simply bail by means of a VERIFY_CHECK.
*
* - If a is infinity, we detect this, and at the end of the
* computation replace the result (which will be meaningless,
* but we compute to be constant-time) with b.x : b.y : 1.
*
* - If a = -b, we have y1 = -y2, which is a degenerate case.
* But here the answer is infinity, so we simply set the
* infinity flag of the result, overriding the computed values
* without even needing to cmov.
*
* - If y1 = -y2 but x1 != x2, which does occur thanks to certain
* properties of our curve (specifically, 1 has nontrivial cube
* roots in our field, and the curve equation has no x coefficient)
* then the answer is not infinity but also not given by the above
* equation. In this case, we cmov in place an alternate expression
* for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
* expressions for lambda are defined, they are equal, and can be
* obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
* then substitution of x^3 + 7 for y^2 (using the curve equation).
* For all pairs of nonzero points (a, b) at least one is defined,
* so this covers everything.
*/
secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */
u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */
secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */
secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
/** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
* case that Z = z1z2 = 0, and this is special-cased later on). */
degenerate = secp256k1_fe_normalizes_to_zero(&m) &
secp256k1_fe_normalizes_to_zero(&rr);
/* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
* This means either x1 == beta*x2 or beta*x1 == x2, where beta is
* a nontrivial cube root of one. In either case, an alternate
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
* so we set R/M equal to this. */
rr_alt = s1;
secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
secp256k1_fe_cmov(&rr_alt, &rr, !degenerate);
secp256k1_fe_cmov(&m_alt, &m, !degenerate);
/* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
* From here on out Ralt and Malt represent the numerator
* and denominator of lambda; R and M represent the explicit
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
/* These two lines use the observation that either M == Malt or M == 0,
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
* zero (which is "computed" by cmov). So the cost is one squaring
* versus two multiplications. */
secp256k1_fe_sqr(&n, &n);
secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */
secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
secp256k1_fe_normalize_weak(&t);
r->x = t; /* r->x = Ralt^2-Q (1) */
secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
/** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
secp256k1_fe_cmov(&r->x, &b->x, a->infinity);
secp256k1_fe_cmov(&r->y, &b->y, a->infinity);
secp256k1_fe_cmov(&r->z, &fe_1, a->infinity);
r->infinity = infinity;
}
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
/* Operations: 4 mul, 1 sqr */
secp256k1_fe zz;
VERIFY_CHECK(!secp256k1_fe_is_zero(s));
secp256k1_fe_sqr(&zz, s);
secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
secp256k1_fe_mul(&r->y, &r->y, &zz);
secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
}
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
secp256k1_fe x, y;
VERIFY_CHECK(!a->infinity);
x = a->x;
secp256k1_fe_normalize(&x);
y = a->y;
secp256k1_fe_normalize(&y);
secp256k1_fe_to_storage(&r->x, &x);
secp256k1_fe_to_storage(&r->y, &y);
}
static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) {
secp256k1_fe_from_storage(&r->x, &a->x);
secp256k1_fe_from_storage(&r->y, &a->y);
r->infinity = 0;
}
static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
secp256k1_fe_storage_cmov(&r->x, &a->x, flag);
secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
}
#ifdef USE_ENDOMORPHISM
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
static const secp256k1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
);
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &beta);
}
#endif
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
secp256k1_fe yz;
if (a->infinity) {
return 0;
}
/* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as
* that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z
is */
secp256k1_fe_mul(&yz, &a->y, &a->z);
return secp256k1_fe_is_quad_var(&yz);
}
#endif /* SECP256K1_GROUP_IMPL_H */

View File

@ -1,41 +0,0 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_HASH_H
#define SECP256K1_HASH_H
#include <stdlib.h>
#include <stdint.h>
typedef struct {
uint32_t s[8];
uint32_t buf[16]; /* In big endian */
size_t bytes;
} secp256k1_sha256;
static void secp256k1_sha256_initialize(secp256k1_sha256 *hash);
static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t size);
static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out32);
typedef struct {
secp256k1_sha256 inner, outer;
} secp256k1_hmac_sha256;
static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t size);
static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256 *hash, const unsigned char *data, size_t size);
static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256 *hash, unsigned char *out32);
typedef struct {
unsigned char v[32];
unsigned char k[32];
int retry;
} secp256k1_rfc6979_hmac_sha256;
static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 *rng);
#endif /* SECP256K1_HASH_H */

View File

@ -1,15 +0,0 @@
#include <stdlib.h>
#include <stdint.h>
#include "org_bitcoin_Secp256k1Context.h"
#include "include/secp256k1.h"
SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
(JNIEnv* env, jclass classObject)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
(void)classObject;(void)env;
return (uintptr_t)ctx;
}

View File

@ -1,67 +0,0 @@
/**********************************************************************
* Copyright (c) 2015 Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_ECDH_MAIN_H
#define SECP256K1_MODULE_ECDH_MAIN_H
#include "include/secp256k1_ecdh.h"
#include "ecmult_const_impl.h"
static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x, const unsigned char *y, void *data) {
unsigned char version = (y[31] & 0x01) | 0x02;
secp256k1_sha256 sha;
(void)data;
secp256k1_sha256_initialize(&sha);
secp256k1_sha256_write(&sha, &version, 1);
secp256k1_sha256_write(&sha, x, 32);
secp256k1_sha256_finalize(&sha, output);
return 1;
}
const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_sha256 = ecdh_hash_function_sha256;
const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_default = ecdh_hash_function_sha256;
int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *output, const secp256k1_pubkey *point, const unsigned char *scalar, secp256k1_ecdh_hash_function hashfp, void *data) {
int ret = 0;
int overflow = 0;
secp256k1_gej res;
secp256k1_ge pt;
secp256k1_scalar s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output != NULL);
ARG_CHECK(point != NULL);
ARG_CHECK(scalar != NULL);
if (hashfp == NULL) {
hashfp = secp256k1_ecdh_hash_function_default;
}
secp256k1_pubkey_load(ctx, &pt, point);
secp256k1_scalar_set_b32(&s, scalar, &overflow);
if (overflow || secp256k1_scalar_is_zero(&s)) {
ret = 0;
} else {
unsigned char x[32];
unsigned char y[32];
secp256k1_ecmult_const(&res, &pt, &s, 256);
secp256k1_ge_set_gej(&pt, &res);
/* Compute a hash of the point */
secp256k1_fe_normalize(&pt.x);
secp256k1_fe_normalize(&pt.y);
secp256k1_fe_get_b32(x, &pt.x);
secp256k1_fe_get_b32(y, &pt.y);
ret = hashfp(output, x, y, data);
}
secp256k1_scalar_clear(&s);
return ret;
}
#endif /* SECP256K1_MODULE_ECDH_MAIN_H */

View File

@ -1,193 +0,0 @@
/**********************************************************************
* Copyright (c) 2013-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_RECOVERY_MAIN_H
#define SECP256K1_MODULE_RECOVERY_MAIN_H
#include "include/secp256k1_recovery.h"
static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) {
(void)ctx;
if (sizeof(secp256k1_scalar) == 32) {
/* When the secp256k1_scalar type is exactly 32 byte, use its
* representation inside secp256k1_ecdsa_signature, as conversion is very fast.
* Note that secp256k1_ecdsa_signature_save must use the same representation. */
memcpy(r, &sig->data[0], 32);
memcpy(s, &sig->data[32], 32);
} else {
secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
}
*recid = sig->data[64];
}
static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) {
if (sizeof(secp256k1_scalar) == 32) {
memcpy(&sig->data[0], r, 32);
memcpy(&sig->data[32], s, 32);
} else {
secp256k1_scalar_get_b32(&sig->data[0], r);
secp256k1_scalar_get_b32(&sig->data[32], s);
}
sig->data[64] = recid;
}
int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
secp256k1_scalar r, s;
int ret = 1;
int overflow = 0;
(void)ctx;
ARG_CHECK(sig != NULL);
ARG_CHECK(input64 != NULL);
ARG_CHECK(recid >= 0 && recid <= 3);
secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
ret &= !overflow;
secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
ret &= !overflow;
if (ret) {
secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
} else {
memset(sig, 0, sizeof(*sig));
}
return ret;
}
int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) {
secp256k1_scalar r, s;
(void)ctx;
ARG_CHECK(output64 != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(recid != NULL);
secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
secp256k1_scalar_get_b32(&output64[0], &r);
secp256k1_scalar_get_b32(&output64[32], &s);
return 1;
}
int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) {
secp256k1_scalar r, s;
int recid;
(void)ctx;
ARG_CHECK(sig != NULL);
ARG_CHECK(sigin != NULL);
secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
secp256k1_ecdsa_signature_save(sig, &r, &s);
return 1;
}
static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) {
unsigned char brx[32];
secp256k1_fe fx;
secp256k1_ge x;
secp256k1_gej xj;
secp256k1_scalar rn, u1, u2;
secp256k1_gej qj;
int r;
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
return 0;
}
secp256k1_scalar_get_b32(brx, sigr);
r = secp256k1_fe_set_b32(&fx, brx);
(void)r;
VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
if (recid & 2) {
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
return 0;
}
secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
}
if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
return 0;
}
secp256k1_gej_set_ge(&xj, &x);
secp256k1_scalar_inverse_var(&rn, sigr);
secp256k1_scalar_mul(&u1, &rn, message);
secp256k1_scalar_negate(&u1, &u1);
secp256k1_scalar_mul(&u2, &rn, sigs);
secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1);
secp256k1_ge_set_gej_var(pubkey, &qj);
return !secp256k1_gej_is_infinity(&qj);
}
int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
secp256k1_scalar r, s;
secp256k1_scalar sec, non, msg;
int recid;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(seckey != NULL);
if (noncefp == NULL) {
noncefp = secp256k1_nonce_function_default;
}
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
unsigned char nonce32[32];
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
secp256k1_scalar_set_b32(&non, nonce32, &overflow);
if (!secp256k1_scalar_is_zero(&non) && !overflow) {
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
break;
}
}
count++;
}
memset(nonce32, 0, 32);
secp256k1_scalar_clear(&msg);
secp256k1_scalar_clear(&non);
secp256k1_scalar_clear(&sec);
}
if (ret) {
secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
} else {
memset(signature, 0, sizeof(*signature));
}
return ret;
}
int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) {
secp256k1_ge q;
secp256k1_scalar r, s;
secp256k1_scalar m;
int recid;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(pubkey != NULL);
secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */
secp256k1_scalar_set_b32(&m, msg32, NULL);
if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
secp256k1_pubkey_save(pubkey, &q);
return 1;
} else {
memset(pubkey, 0, sizeof(*pubkey));
return 0;
}
}
#endif /* SECP256K1_MODULE_RECOVERY_MAIN_H */

View File

@ -1,393 +0,0 @@
/**********************************************************************
* Copyright (c) 2013-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_RECOVERY_TESTS_H
#define SECP256K1_MODULE_RECOVERY_TESTS_H
static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
(void) msg32;
(void) key32;
(void) algo16;
(void) data;
/* On the first run, return 0 to force a second run */
if (counter == 0) {
memset(nonce32, 0, 32);
return 1;
}
/* On the second run, return an overflow to force a third run */
if (counter == 1) {
memset(nonce32, 0xff, 32);
return 1;
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
return secp256k1_rand_bits(1);
}
void test_ecdsa_recovery_api(void) {
/* Setup contexts that just count errors */
secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
secp256k1_pubkey pubkey;
secp256k1_pubkey recpubkey;
secp256k1_ecdsa_signature normal_sig;
secp256k1_ecdsa_recoverable_signature recsig;
unsigned char privkey[32] = { 1 };
unsigned char message[32] = { 2 };
int32_t ecount = 0;
int recid = 0;
unsigned char sig[74];
unsigned char zero_privkey[32] = { 0 };
unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
/* Construct and verify corresponding public key. */
CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Check bad contexts and NULLs for signing */
ecount = 0;
CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
CHECK(ecount == 5);
/* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
CHECK(ecount == 5);
/* These will all fail, but not in ARG_CHECK way */
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
/* This one will succeed. */
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 5);
/* Check signing with a goofy nonce function */
/* Check bad contexts and NULLs for recovery */
ecount = 0;
CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
CHECK(ecount == 5);
/* Check NULLs for conversion */
CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
/* Check NULLs for de/serialization */
CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
CHECK(ecount == 5);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
CHECK(ecount == 6);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
CHECK(ecount == 7);
/* overflow in signature will fail but not affect ecount */
memcpy(sig, over_privkey, 32);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
CHECK(ecount == 7);
/* cleanup */
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);
secp256k1_context_destroy(vrfy);
secp256k1_context_destroy(both);
}
void test_ecdsa_recovery_end_to_end(void) {
unsigned char extra[32] = {0x00};
unsigned char privkey[32];
unsigned char message[32];
secp256k1_ecdsa_signature signature[5];
secp256k1_ecdsa_recoverable_signature rsignature[5];
unsigned char sig[74];
secp256k1_pubkey pubkey;
secp256k1_pubkey recpubkey;
int recid = 0;
/* Generate a random key and message. */
{
secp256k1_scalar msg, key;
random_scalar_order_test(&msg);
random_scalar_order_test(&key);
secp256k1_scalar_get_b32(privkey, &key);
secp256k1_scalar_get_b32(message, &msg);
}
/* Construct and verify corresponding public key. */
CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Serialize/parse compact and verify/recover. */
extra[0] = 0;
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
extra[31] = 1;
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
extra[31] = 0;
extra[0] = 1;
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
/* Parse compact (with recovery id) and recover. */
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */
void test_ecdsa_recovery_edge_cases(void) {
const unsigned char msg32[32] = {
'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
'a', ' ', 'v', 'e', 'r', 'y', ' ', 's',
'e', 'c', 'r', 'e', 't', ' ', 'm', 'e',
's', 's', 'a', 'g', 'e', '.', '.', '.'
};
const unsigned char sig64[64] = {
/* Generated by signing the above message with nonce 'This is the nonce we will use...'
* and secret key 0 (which is not valid), resulting in recid 0. */
0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8,
0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96,
0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63,
0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32,
0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E,
0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD,
0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
};
secp256k1_pubkey pubkey;
/* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
const unsigned char sigb64[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
secp256k1_pubkey pubkeyb;
secp256k1_ecdsa_recoverable_signature rsig;
secp256k1_ecdsa_signature sig;
int recid;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
for (recid = 0; recid < 4; recid++) {
int i;
int recid2;
/* (4,4) encoded in DER. */
unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04};
unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01};
unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00};
unsigned char sigbderalt1[39] = {
0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
};
unsigned char sigbderalt2[39] = {
0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
unsigned char sigbderalt3[40] = {
0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
};
unsigned char sigbderalt4[40] = {
0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
/* (order + r,4) encoded in DER. */
unsigned char sigbderlong[40] = {
0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC,
0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
};
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
for (recid2 = 0; recid2 < 4; recid2++) {
secp256k1_pubkey pubkey2b;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
/* Verifying with (order + r,4) should always fail. */
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
/* DER parsing tests. */
/* Zero length r/s. */
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
/* Leading zeros. */
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
sigbderalt3[4] = 1;
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbderalt4[7] = 1;
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
/* Damage signature. */
sigbder[7]++;
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbder[7]--;
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
for(i = 0; i < 8; i++) {
int c;
unsigned char orig = sigbder[i];
/*Try every single-byte change.*/
for (c = 0; c < 256; c++) {
if (c == orig ) {
continue;
}
sigbder[i] = c;
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
sigbder[i] = orig;
}
}
/* Test r/s equal to zero */
{
/* (1,1) encoded in DER. */
unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01};
unsigned char sigc64[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
};
secp256k1_pubkey pubkeyc;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
sigcder[4] = 0;
sigc64[31] = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
sigcder[4] = 1;
sigcder[7] = 0;
sigc64[31] = 1;
sigc64[63] = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
}
}
void run_recovery_tests(void) {
int i;
for (i = 0; i < count; i++) {
test_ecdsa_recovery_api();
}
for (i = 0; i < 64*count; i++) {
test_ecdsa_recovery_end_to_end();
}
test_ecdsa_recovery_edge_cases();
}
#endif /* SECP256K1_MODULE_RECOVERY_TESTS_H */

View File

@ -1,74 +0,0 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_NUM_H
#define SECP256K1_NUM_H
#ifndef USE_NUM_NONE
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#if defined(USE_NUM_GMP)
#include "num_gmp.h"
#else
#error "Please select num implementation"
#endif
/** Copy a number. */
static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a);
/** Convert a number's absolute value to a binary big-endian string.
* There must be enough place. */
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a);
/** Set a number to the value of a binary big-endian string. */
static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen);
/** Compute a modular inverse. The input must be less than the modulus. */
static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m);
/** Compute the jacobi symbol (a|b). b must be positive and odd. */
static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b);
/** Compare the absolute value of two numbers. */
static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b);
/** Test whether two number are equal (including sign). */
static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b);
/** Add two (signed) numbers. */
static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
/** Subtract two (signed) numbers. */
static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
/** Multiply two (signed) numbers. */
static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
even if r was negative. */
static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
/** Right-shift the passed number by bits bits. */
static void secp256k1_num_shift(secp256k1_num *r, int bits);
/** Check whether a number is zero. */
static int secp256k1_num_is_zero(const secp256k1_num *a);
/** Check whether a number is one. */
static int secp256k1_num_is_one(const secp256k1_num *a);
/** Check whether a number is strictly negative. */
static int secp256k1_num_is_neg(const secp256k1_num *a);
/** Change a number's sign. */
static void secp256k1_num_negate(secp256k1_num *r);
#endif
#endif /* SECP256K1_NUM_H */

View File

@ -1,106 +0,0 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_SCALAR_H
#define SECP256K1_SCALAR_H
#include "num.h"
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#if defined(EXHAUSTIVE_TEST_ORDER)
#include "scalar_low.h"
#elif defined(USE_SCALAR_4X64)
#include "scalar_4x64.h"
#elif defined(USE_SCALAR_8X32)
#include "scalar_8x32.h"
#else
#error "Please select scalar implementation"
#endif
/** Clear a scalar to prevent the leak of sensitive data. */
static void secp256k1_scalar_clear(secp256k1_scalar *r);
/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
/** Access bits from a scalar. Not constant time. */
static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
/** Set a scalar from a big endian byte array. */
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow);
/** Set a scalar to an unsigned integer. */
static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v);
/** Convert a scalar to a byte array. */
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a);
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag);
/** Multiply two scalars (modulo the group order). */
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
/** Shift a scalar right by some amount strictly between 0 and 16, returning
* the low bits that were shifted off */
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n);
/** Compute the square of a scalar (modulo the group order). */
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Compute the inverse of a scalar (modulo the group order). */
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Compute the complement of a scalar (modulo the group order). */
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Check whether a scalar equals zero. */
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a);
/** Check whether a scalar equals one. */
static int secp256k1_scalar_is_one(const secp256k1_scalar *a);
/** Check whether a scalar, considered as an nonnegative integer, is even. */
static int secp256k1_scalar_is_even(const secp256k1_scalar *a);
/** Check whether a scalar is higher than the group order divided by 2. */
static int secp256k1_scalar_is_high(const secp256k1_scalar *a);
/** Conditionally negate a number, in constant time.
* Returns -1 if the number was negated, 1 otherwise */
static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag);
#ifndef USE_NUM_NONE
/** Convert a scalar to a number. */
static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a);
/** Get the order of the group as a number. */
static void secp256k1_scalar_order_get_num(secp256k1_num *r);
#endif
/** Compare two scalars. */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
#ifdef USE_ENDOMORPHISM
/** Find r1 and r2 such that r1+r2*2^128 = a. */
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
#endif
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
#endif /* SECP256K1_SCALAR_H */

View File

@ -1,114 +0,0 @@
/**********************************************************************
* Copyright (c) 2015 Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_SCALAR_REPR_IMPL_H
#define SECP256K1_SCALAR_REPR_IMPL_H
#include "scalar.h"
#include <string.h>
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
return !(*a & 1);
}
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
if (offset < 32)
return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
else
return 0;
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
return secp256k1_scalar_get_bits(a, offset, count);
}
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
return *r < *b;
}
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
if (flag && bit < 32)
*r += (1 << bit);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
int i;
*r = 0;
for (i = 0; i < 32; i++) {
*r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
}
/* just deny overflow, it basically always happens */
if (overflow) *overflow = 0;
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
memset(bin, 0, 32);
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
}
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
return *a == 0;
}
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
if (*a == 0) {
*r = 0;
} else {
*r = EXHAUSTIVE_TEST_ORDER - *a;
}
}
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
return *a == 1;
}
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
return *a > EXHAUSTIVE_TEST_ORDER / 2;
}
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
if (flag) secp256k1_scalar_negate(r, r);
return flag ? -1 : 1;
}
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
}
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
ret = *r & ((1 << n) - 1);
*r >>= n;
return ret;
}
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
*r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
}
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
*r1 = *a;
*r2 = 0;
}
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return *a == *b;
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -1,651 +0,0 @@
/**********************************************************************
* Copyright (c) 2013-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h"
#include "util.h"
#include "num_impl.h"
#include "field_impl.h"
#include "scalar_impl.h"
#include "group_impl.h"
#include "ecmult_impl.h"
#include "ecmult_const_impl.h"
#include "ecmult_gen_impl.h"
#include "ecdsa_impl.h"
#include "eckey_impl.h"
#include "hash_impl.h"
#include "scratch_impl.h"
#define ARG_CHECK(cond) do { \
if (EXPECT(!(cond), 0)) { \
secp256k1_callback_call(&ctx->illegal_callback, #cond); \
return 0; \
} \
} while(0)
#define ARG_CHECK_NO_RETURN(cond) do { \
if (EXPECT(!(cond), 0)) { \
secp256k1_callback_call(&ctx->illegal_callback, #cond); \
} \
} while(0)
#ifndef USE_EXTERNAL_DEFAULT_CALLBACKS
#include <stdlib.h>
#include <stdio.h>
static void secp256k1_default_illegal_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
abort();
}
static void secp256k1_default_error_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
abort();
}
#else
void secp256k1_default_illegal_callback_fn(const char* str, void* data);
void secp256k1_default_error_callback_fn(const char* str, void* data);
#endif
static const secp256k1_callback default_illegal_callback = {
secp256k1_default_illegal_callback_fn,
NULL
};
static const secp256k1_callback default_error_callback = {
secp256k1_default_error_callback_fn,
NULL
};
struct secp256k1_context_struct {
secp256k1_ecmult_context ecmult_ctx;
secp256k1_ecmult_gen_context ecmult_gen_ctx;
secp256k1_callback illegal_callback;
secp256k1_callback error_callback;
};
static const secp256k1_context secp256k1_context_no_precomp_ = {
{ 0 },
{ 0 },
{ secp256k1_default_illegal_callback_fn, 0 },
{ secp256k1_default_error_callback_fn, 0 }
};
const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_;
size_t secp256k1_context_preallocated_size(unsigned int flags) {
size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
secp256k1_callback_call(&default_illegal_callback,
"Invalid flags");
return 0;
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
}
return ret;
}
size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) {
size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
VERIFY_CHECK(ctx != NULL);
if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
}
if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) {
ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
}
return ret;
}
secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) {
void* const base = prealloc;
size_t prealloc_size;
secp256k1_context* ret;
VERIFY_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_size(flags);
ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size);
ret->illegal_callback = default_illegal_callback;
ret->error_callback = default_error_callback;
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
secp256k1_callback_call(&ret->illegal_callback,
"Invalid flags");
return NULL;
}
secp256k1_ecmult_context_init(&ret->ecmult_ctx);
secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc);
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc);
}
return (secp256k1_context*) ret;
}
secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) {
size_t prealloc_size;
secp256k1_context* ret;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
ret = (secp256k1_context*)prealloc;
memcpy(ret, ctx, prealloc_size);
secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx);
secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx);
return ret;
}
void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) {
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (ctx != NULL) {
secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
}
}
void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (fun == NULL) {
fun = secp256k1_default_illegal_callback_fn;
}
ctx->illegal_callback.fn = fun;
ctx->illegal_callback.data = data;
}
void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp);
if (fun == NULL) {
fun = secp256k1_default_error_callback_fn;
}
ctx->error_callback.fn = fun;
ctx->error_callback.data = data;
}
static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {
if (sizeof(secp256k1_ge_storage) == 64) {
/* When the secp256k1_ge_storage type is exactly 64 byte, use its
* representation inside secp256k1_pubkey, as conversion is very fast.
* Note that secp256k1_pubkey_save must use the same representation. */
secp256k1_ge_storage s;
memcpy(&s, &pubkey->data[0], sizeof(s));
secp256k1_ge_from_storage(ge, &s);
} else {
/* Otherwise, fall back to 32-byte big endian for X and Y. */
secp256k1_fe x, y;
secp256k1_fe_set_b32(&x, pubkey->data);
secp256k1_fe_set_b32(&y, pubkey->data + 32);
secp256k1_ge_set_xy(ge, &x, &y);
}
ARG_CHECK(!secp256k1_fe_is_zero(&ge->x));
return 1;
}
static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) {
if (sizeof(secp256k1_ge_storage) == 64) {
secp256k1_ge_storage s;
secp256k1_ge_to_storage(&s, ge);
memcpy(&pubkey->data[0], &s, sizeof(s));
} else {
VERIFY_CHECK(!secp256k1_ge_is_infinity(ge));
secp256k1_fe_normalize_var(&ge->x);
secp256k1_fe_normalize_var(&ge->y);
secp256k1_fe_get_b32(pubkey->data, &ge->x);
secp256k1_fe_get_b32(pubkey->data + 32, &ge->y);
}
}
int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
secp256k1_ge Q;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(input != NULL);
if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
return 0;
}
secp256k1_pubkey_save(pubkey, &Q);
secp256k1_ge_clear(&Q);
return 1;
}
int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) {
secp256k1_ge Q;
size_t len;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(outputlen != NULL);
ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65));
len = *outputlen;
*outputlen = 0;
ARG_CHECK(output != NULL);
memset(output, 0, len);
ARG_CHECK(pubkey != NULL);
ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION);
if (secp256k1_pubkey_load(ctx, &Q, pubkey)) {
ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION);
if (ret) {
*outputlen = len;
}
}
return ret;
}
static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) {
(void)ctx;
if (sizeof(secp256k1_scalar) == 32) {
/* When the secp256k1_scalar type is exactly 32 byte, use its
* representation inside secp256k1_ecdsa_signature, as conversion is very fast.
* Note that secp256k1_ecdsa_signature_save must use the same representation. */
memcpy(r, &sig->data[0], 32);
memcpy(s, &sig->data[32], 32);
} else {
secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
}
}
static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) {
if (sizeof(secp256k1_scalar) == 32) {
memcpy(&sig->data[0], r, 32);
memcpy(&sig->data[32], s, 32);
} else {
secp256k1_scalar_get_b32(&sig->data[0], r);
secp256k1_scalar_get_b32(&sig->data[32], s);
}
}
int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
secp256k1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(input != NULL);
if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) {
secp256k1_ecdsa_signature_save(sig, &r, &s);
return 1;
} else {
memset(sig, 0, sizeof(*sig));
return 0;
}
}
int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) {
secp256k1_scalar r, s;
int ret = 1;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(input64 != NULL);
secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
ret &= !overflow;
secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
ret &= !overflow;
if (ret) {
secp256k1_ecdsa_signature_save(sig, &r, &s);
} else {
memset(sig, 0, sizeof(*sig));
}
return ret;
}
int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) {
secp256k1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output != NULL);
ARG_CHECK(outputlen != NULL);
ARG_CHECK(sig != NULL);
secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s);
}
int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) {
secp256k1_scalar r, s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output64 != NULL);
ARG_CHECK(sig != NULL);
secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
secp256k1_scalar_get_b32(&output64[0], &r);
secp256k1_scalar_get_b32(&output64[32], &s);
return 1;
}
int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) {
secp256k1_scalar r, s;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(sigin != NULL);
secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin);
ret = secp256k1_scalar_is_high(&s);
if (sigout != NULL) {
if (ret) {
secp256k1_scalar_negate(&s, &s);
}
secp256k1_ecdsa_signature_save(sigout, &r, &s);
}
return ret;
}
int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
secp256k1_ge q;
secp256k1_scalar r, s;
secp256k1_scalar m;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(pubkey != NULL);
secp256k1_scalar_set_b32(&m, msg32, NULL);
secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
return (!secp256k1_scalar_is_high(&s) &&
secp256k1_pubkey_load(ctx, &q, pubkey) &&
secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
}
static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) {
memcpy(buf + *offset, data, len);
*offset += len;
}
static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
unsigned char keydata[112];
unsigned int offset = 0;
secp256k1_rfc6979_hmac_sha256 rng;
unsigned int i;
/* We feed a byte array to the PRNG as input, consisting of:
* - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d.
* - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data.
* - optionally 16 extra bytes with the algorithm name.
* Because the arguments have distinct fixed lengths it is not possible for
* different argument mixtures to emulate each other and result in the same
* nonces.
*/
buffer_append(keydata, &offset, key32, 32);
buffer_append(keydata, &offset, msg32, 32);
if (data != NULL) {
buffer_append(keydata, &offset, data, 32);
}
if (algo16 != NULL) {
buffer_append(keydata, &offset, algo16, 16);
}
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset);
memset(keydata, 0, sizeof(keydata));
for (i = 0; i <= counter; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
return 1;
}
const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979;
const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979;
int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
secp256k1_scalar r, s;
secp256k1_scalar sec, non, msg;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(seckey != NULL);
if (noncefp == NULL) {
noncefp = secp256k1_nonce_function_default;
}
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
unsigned char nonce32[32];
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
secp256k1_scalar_set_b32(&non, nonce32, &overflow);
if (!overflow && !secp256k1_scalar_is_zero(&non)) {
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
break;
}
}
count++;
}
memset(nonce32, 0, 32);
secp256k1_scalar_clear(&msg);
secp256k1_scalar_clear(&non);
secp256k1_scalar_clear(&sec);
}
if (ret) {
secp256k1_ecdsa_signature_save(signature, &r, &s);
} else {
memset(signature, 0, sizeof(*signature));
}
return ret;
}
int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) {
secp256k1_scalar sec;
int ret;
int overflow;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
ret = !overflow && !secp256k1_scalar_is_zero(&sec);
secp256k1_scalar_clear(&sec);
return ret;
}
int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
secp256k1_gej pj;
secp256k1_ge p;
secp256k1_scalar sec;
int overflow;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
memset(pubkey, 0, sizeof(*pubkey));
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(seckey != NULL);
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec));
if (ret) {
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
secp256k1_ge_set_gej(&p, &pj);
secp256k1_pubkey_save(pubkey, &p);
}
secp256k1_scalar_clear(&sec);
return ret;
}
int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *seckey) {
secp256k1_scalar sec;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
secp256k1_scalar_set_b32(&sec, seckey, NULL);
secp256k1_scalar_negate(&sec, &sec);
secp256k1_scalar_get_b32(seckey, &sec);
secp256k1_scalar_clear(&sec);
return 1;
}
int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *pubkey) {
int ret = 0;
secp256k1_ge p;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
ret = secp256k1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
if (ret) {
secp256k1_ge_neg(&p, &p);
secp256k1_pubkey_save(pubkey, &p);
}
return ret;
}
int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
secp256k1_scalar term;
secp256k1_scalar sec;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ARG_CHECK(tweak != NULL);
secp256k1_scalar_set_b32(&term, tweak, &overflow);
secp256k1_scalar_set_b32(&sec, seckey, NULL);
ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term);
memset(seckey, 0, 32);
if (ret) {
secp256k1_scalar_get_b32(seckey, &sec);
}
secp256k1_scalar_clear(&sec);
secp256k1_scalar_clear(&term);
return ret;
}
int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
secp256k1_ge p;
secp256k1_scalar term;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(pubkey != NULL);
ARG_CHECK(tweak != NULL);
secp256k1_scalar_set_b32(&term, tweak, &overflow);
ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
if (ret) {
if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) {
secp256k1_pubkey_save(pubkey, &p);
} else {
ret = 0;
}
}
return ret;
}
int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
secp256k1_scalar factor;
secp256k1_scalar sec;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ARG_CHECK(tweak != NULL);
secp256k1_scalar_set_b32(&factor, tweak, &overflow);
secp256k1_scalar_set_b32(&sec, seckey, NULL);
ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor);
memset(seckey, 0, 32);
if (ret) {
secp256k1_scalar_get_b32(seckey, &sec);
}
secp256k1_scalar_clear(&sec);
secp256k1_scalar_clear(&factor);
return ret;
}
int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
secp256k1_ge p;
secp256k1_scalar factor;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(pubkey != NULL);
ARG_CHECK(tweak != NULL);
secp256k1_scalar_set_b32(&factor, tweak, &overflow);
ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
if (ret) {
if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) {
secp256k1_pubkey_save(pubkey, &p);
} else {
ret = 0;
}
}
return ret;
}
int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) {
VERIFY_CHECK(ctx != NULL);
if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
}
return 1;
}
int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) {
size_t i;
secp256k1_gej Qj;
secp256k1_ge Q;
ARG_CHECK(pubnonce != NULL);
memset(pubnonce, 0, sizeof(*pubnonce));
ARG_CHECK(n >= 1);
ARG_CHECK(pubnonces != NULL);
secp256k1_gej_set_infinity(&Qj);
for (i = 0; i < n; i++) {
secp256k1_pubkey_load(ctx, &Q, pubnonces[i]);
secp256k1_gej_add_ge(&Qj, &Qj, &Q);
}
if (secp256k1_gej_is_infinity(&Qj)) {
return 0;
}
secp256k1_ge_set_gej(&Q, &Qj);
secp256k1_pubkey_save(pubnonce, &Q);
return 1;
}
#ifdef ENABLE_MODULE_ECDH
# include "modules/ecdh/main_impl.h"
#endif
#ifdef ENABLE_MODULE_RECOVERY
# include "modules/recovery/main_impl.h"
#endif

40
secp256k1-sys/Cargo.toml Normal file
View File

@ -0,0 +1,40 @@
[package]
name = "secp256k1-sys"
version = "0.1.0"
authors = [ "Dawid Ciężarkiewicz <dpc@ucore.info>",
"Andrew Poelstra <apoelstra@wpsoftware.net>",
"Steven Roose <steven@stevenroose.org>" ]
license = "CC0-1.0"
homepage = "https://github.com/rust-bitcoin/rust-secp256k1/"
repository = "https://github.com/rust-bitcoin/rust-secp256k1/"
documentation = "https://docs.rs/secp256k1-sys/"
description = "FFI for Pieter Wuille's `libsecp256k1` library."
keywords = [ "crypto", "ECDSA", "secp256k1", "libsecp256k1", "bitcoin", "ffi" ]
readme = "README.md"
build = "build.rs"
links = "secp256k1"
# Should make docs.rs show all functions, even those behind non-default features
[package.metadata.docs.rs]
features = [ "recovery", "endomorphism", "lowmemory" ]
[build-dependencies]
cc = ">= 1.0.28, <= 1.0.41"
[lib]
name = "secp256k1_sys"
path = "src/lib.rs"
[features]
default = ["std"]
recovery = []
endomorphism = []
lowmemory = []
std = []
# Use this feature to not compile the bundled libsecp256k1 C symbols,
# but use external ones. Use this only if you know what you are doing!
external-symbols = []
# Do not use this feature! HAZMAT. (meant for Fuzzing only. this is *BROKEN CRYPTOGRAPHY*)
fuzztarget = []

122
secp256k1-sys/LICENSE Normal file
View File

@ -0,0 +1,122 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

34
secp256k1-sys/README.md Normal file
View File

@ -0,0 +1,34 @@
secp256k1-sys
=============
This crate provides Rust definitions for the FFI structures and methods.
## Vendoring
The default build process is to build using the vendored libsecp256k1 sources in
the depend folder. These sources are prefixed with a special
rust-secp256k1-sys-specific prefix `rustsecp256k1_v1_2_3_`.
This prefix ensures that no symbol collision can happen:
- when a Rust project has two different versions of rust-secp256k1 in its
depepdency tree, or
- when rust-secp256k1 is used for building a static library in a context where
existing libsecp256k1 symbols are already linked.
To update the vendored sources, use the `vendor-libsecp.sh` script:
```
$ ./vendor-libsecp.sh depend <version-code> <rev>
```
- Where `<version-code>` is the secp256k1-sys version number underscored: `0_1_2`.
- Where `<rev>` is the git revision of libsecp256k1 to checkout.
## Linking to external symbols
For the more exotic use cases, this crate can be used with existing libsecp256k1
symbols by using the `external-symbols` feature. How to setup rustc to link
against those existing symbols is left as an exercise to the reader.

View File

@ -26,6 +26,11 @@ extern crate cc;
use std::env;
fn main() {
if cfg!(feature = "external-symbols") {
println!("cargo:rustc-link-lib=static=secp256k1");
return;
}
// Check whether we can use 64-bit compilation
let use_64bit_compilation = if env::var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap() == "64" {
let check = cc::Build::new().file("depend/check_uint128_t.c")

View File

@ -0,0 +1,26 @@
13,37d12
< static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) {
< const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
< void *alloc = checked_malloc(error_callback, base_alloc + size);
< secp256k1_scratch* ret = (secp256k1_scratch *)alloc;
< if (ret != NULL) {
< memset(ret, 0, sizeof(*ret));
< memcpy(ret->magic, "scratch", 8);
< ret->data = (void *) ((char *) alloc + base_alloc);
< ret->max_size = size;
< }
< return ret;
< }
<
< static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
< if (scratch != NULL) {
< VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
< if (memcmp(scratch->magic, "scratch", 8) != 0) {
< secp256k1_callback_call(error_callback, "invalid scratch space");
< return;
< }
< memset(scratch->magic, 0, sizeof(scratch->magic));
< free(scratch);
< }
< }
<

View File

@ -0,0 +1,2 @@
# This file was automatically created by ./vendor-libsecp.sh
143dc6e9ee31852a60321b23eea407d2006171da

View File

@ -0,0 +1,43 @@
139,149d138
< secp256k1_context* secp256k1_context_create(unsigned int flags) {
< size_t const prealloc_size = secp256k1_context_preallocated_size(flags);
< secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size);
< if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) {
< free(ctx);
< return NULL;
< }
<
< return ctx;
< }
<
164,174d152
< secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
< secp256k1_context* ret;
< size_t prealloc_size;
<
< VERIFY_CHECK(ctx != NULL);
< prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
< ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size);
< ret = secp256k1_context_preallocated_clone(ctx, ret);
< return ret;
< }
<
183,189d160
< void secp256k1_context_destroy(secp256k1_context* ctx) {
< if (ctx != NULL) {
< secp256k1_context_preallocated_destroy(ctx);
< free(ctx);
< }
< }
<
206,215d176
< }
<
< secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* ctx, size_t max_size) {
< VERIFY_CHECK(ctx != NULL);
< return secp256k1_scratch_create(&ctx->error_callback, max_size);
< }
<
< void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) {
< VERIFY_CHECK(ctx != NULL);
< secp256k1_scratch_destroy(&ctx->error_callback, scratch);

View File

@ -0,0 +1,22 @@
202,204d201
< SECP256K1_API secp256k1_context* secp256k1_context_create(
< unsigned int flags
< ) SECP256K1_WARN_UNUSED_RESULT;
215,217d211
< SECP256K1_API secp256k1_context* secp256k1_context_clone(
< const secp256k1_context* ctx
< ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
232,234d225
< SECP256K1_API void secp256k1_context_destroy(
< secp256k1_context* ctx
< );
311,314d301
< SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space* secp256k1_scratch_space_create(
< const secp256k1_context* ctx,
< size_t size
< ) SECP256K1_ARG_NONNULL(1);
322,325d308
< SECP256K1_API void secp256k1_scratch_space_destroy(
< const secp256k1_context* ctx,
< secp256k1_scratch_space* scratch
< ) SECP256K1_ARG_NONNULL(1);

View File

@ -2,13 +2,13 @@ ACLOCAL_AMFLAGS = -I build-aux/m4
lib_LTLIBRARIES = libsecp256k1.la
if USE_JNI
JNI_LIB = libsecp256k1_jni.la
JNI_LIB = librustsecp256k1_v0_1_0_jni.la
noinst_LTLIBRARIES = $(JNI_LIB)
else
JNI_LIB =
endif
include_HEADERS = include/secp256k1.h
include_HEADERS += include/secp256k1_preallocated.h
include_HEADERS += include/rustsecp256k1_v0_1_0_preallocated.h
noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
@ -58,7 +58,7 @@ noinst_HEADERS += contrib/lax_der_privatekey_parsing.h
noinst_HEADERS += contrib/lax_der_privatekey_parsing.c
if USE_EXTERNAL_ASM
COMMON_LIB = libsecp256k1_common.la
COMMON_LIB = librustsecp256k1_v0_1_0_common.la
noinst_LTLIBRARIES = $(COMMON_LIB)
else
COMMON_LIB =
@ -69,16 +69,16 @@ pkgconfig_DATA = libsecp256k1.pc
if USE_EXTERNAL_ASM
if USE_ASM_ARM
libsecp256k1_common_la_SOURCES = src/asm/field_10x26_arm.s
librustsecp256k1_v0_1_0_common_la_SOURCES = src/asm/field_10x26_arm.s
endif
endif
libsecp256k1_la_SOURCES = src/secp256k1.c
libsecp256k1_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
libsecp256k1_la_LIBADD = $(JNI_LIB) $(SECP_LIBS) $(COMMON_LIB)
librustsecp256k1_v0_1_0_la_SOURCES = src/secp256k1.c
librustsecp256k1_v0_1_0_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
librustsecp256k1_v0_1_0_la_LIBADD = $(JNI_LIB) $(SECP_LIBS) $(COMMON_LIB)
libsecp256k1_jni_la_SOURCES = src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c
libsecp256k1_jni_la_CPPFLAGS = -DSECP256K1_BUILD $(JNI_INCLUDES)
librustsecp256k1_v0_1_0_jni_la_SOURCES = src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c
librustsecp256k1_v0_1_0_jni_la_CPPFLAGS = -DSECP256K1_BUILD $(JNI_INCLUDES)
noinst_PROGRAMS =
if USE_BENCHMARK
@ -161,7 +161,7 @@ gen_%.o: src/gen_%.c
$(gen_context_BIN): $(gen_context_OBJECTS)
$(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
$(librustsecp256k1_v0_1_0_la_OBJECTS): src/ecmult_static_context.h
$(tests_OBJECTS): src/ecmult_static_context.h
$(bench_internal_OBJECTS): src/ecmult_static_context.h
$(bench_ecmult_OBJECTS): src/ecmult_static_context.h

View File

@ -140,7 +140,7 @@ AC_ARG_ENABLE(external_default_callbacks,
[use_external_default_callbacks=no])
AC_ARG_ENABLE(jni,
AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]),
AS_HELP_STRING([--enable-jni],[enable librustsecp256k1_v0_1_0_jni [default=no]]),
[use_jni=$enableval],
[use_jni=no])

View File

@ -9,7 +9,7 @@
#include "lax_der_parsing.h"
int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
size_t rpos, rlen, spos, slen;
size_t pos = 0;
size_t lenbyte;
@ -17,7 +17,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_
int overflow = 0;
/* Hack to initialize sig with a correctly-parsed but invalid signature. */
secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
/* Sequence tag byte */
if (pos == inputlen || input[pos] != 0x30) {
@ -139,11 +139,11 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_
}
if (!overflow) {
overflow = !secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
overflow = !rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
}
if (overflow) {
memset(tmpsig, 0, 64);
secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig);
}
return 1;
}

View File

@ -26,8 +26,8 @@
* certain violations are easily supported. You may need to adapt it.
*
* Do not use this for new systems. Use well-defined DER or compact signatures
* instead if you have the choice (see secp256k1_ecdsa_signature_parse_der and
* secp256k1_ecdsa_signature_parse_compact).
* instead if you have the choice (see rustsecp256k1_v0_1_0_ecdsa_signature_parse_der and
* rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact).
*
* The supported violations are:
* - All numbers are parsed as nonnegative integers, even though X.609-0207
@ -77,9 +77,9 @@ extern "C" {
* encoded numbers are out of range, signature validation with it is
* guaranteed to fail for every message and public key.
*/
int ecdsa_signature_parse_der_lax(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der_lax(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -9,7 +9,7 @@
#include "lax_der_privatekey_parsing.h"
int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
int ec_privkey_import_der(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) {
const unsigned char *end = privkey + privkeylen;
int lenb = 0;
int len = 0;
@ -46,17 +46,17 @@ int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, co
return 0;
}
memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
if (!secp256k1_ec_seckey_verify(ctx, out32)) {
if (!rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, out32)) {
memset(out32, 0, 32);
return 0;
}
return 1;
}
int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
secp256k1_pubkey pubkey;
int ec_privkey_export_der(const rustsecp256k1_v0_1_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) {
rustsecp256k1_v0_1_0_pubkey pubkey;
size_t pubkeylen = 0;
if (!secp256k1_ec_pubkey_create(ctx, &pubkey, key32)) {
if (!rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, key32)) {
*privkeylen = 0;
return 0;
}
@ -80,7 +80,7 @@ int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey,
memcpy(ptr, key32, 32); ptr += 32;
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
pubkeylen = 33;
secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
} else {
@ -105,7 +105,7 @@ int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey,
memcpy(ptr, key32, 32); ptr += 32;
memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
pubkeylen = 65;
secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
ptr += pubkeylen;
*privkeylen = ptr - privkey;
}

View File

@ -52,10 +52,10 @@ extern "C" {
* simple 32-byte private keys are sufficient.
*
* Note that this function does not guarantee correct DER output. It is
* guaranteed to be parsable by secp256k1_ec_privkey_import_der
* guaranteed to be parsable by rustsecp256k1_v0_1_0_ec_privkey_import_der
*/
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
const secp256k1_context* ctx,
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *privkey,
size_t *privkeylen,
const unsigned char *seckey,
@ -77,7 +77,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der(
* key.
*/
SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der(
const secp256k1_context* ctx,
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *seckey,
const unsigned char *privkey,
size_t privkeylen

View File

@ -35,13 +35,13 @@ extern "C" {
* A constructed context can safely be used from multiple threads
* simultaneously, but API calls that take a non-const pointer to a context
* need exclusive access to it. In particular this is the case for
* secp256k1_context_destroy, secp256k1_context_preallocated_destroy,
* and secp256k1_context_randomize.
* rustsecp256k1_v0_1_0_context_destroy, rustsecp256k1_v0_1_0_context_preallocated_destroy,
* and rustsecp256k1_v0_1_0_context_randomize.
*
* Regarding randomization, either do it once at creation time (in which case
* you do not need any locking for the other calls), or use a read-write lock.
*/
typedef struct secp256k1_context_struct secp256k1_context;
typedef struct rustsecp256k1_v0_1_0_context_struct rustsecp256k1_v0_1_0_context;
/** Opaque data structure that holds rewriteable "scratch space"
*
@ -54,7 +54,7 @@ typedef struct secp256k1_context_struct secp256k1_context;
* Unlike the context object, this cannot safely be shared between threads
* without additional synchronization logic.
*/
typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space;
typedef struct rustsecp256k1_v0_1_0_scratch_space_struct rustsecp256k1_v0_1_0_scratch_space;
/** Opaque data structure that holds a parsed and valid public key.
*
@ -62,11 +62,11 @@ typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space;
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse.
* comparison, use rustsecp256k1_v0_1_0_ec_pubkey_serialize and rustsecp256k1_v0_1_0_ec_pubkey_parse.
*/
typedef struct {
unsigned char data[64];
} secp256k1_pubkey;
} rustsecp256k1_v0_1_0_pubkey;
/** Opaque data structured that holds a parsed ECDSA signature.
*
@ -74,12 +74,12 @@ typedef struct {
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage, transmission, or
* comparison, use the secp256k1_ecdsa_signature_serialize_* and
* secp256k1_ecdsa_signature_parse_* functions.
* comparison, use the rustsecp256k1_v0_1_0_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_1_0_ecdsa_signature_parse_* functions.
*/
typedef struct {
unsigned char data[64];
} secp256k1_ecdsa_signature;
} rustsecp256k1_v0_1_0_ecdsa_signature;
/** A pointer to a function to deterministically generate a nonce.
*
@ -97,7 +97,7 @@ typedef struct {
* Except for test cases, this function should compute some cryptographic hash of
* the message, the algorithm, the key and the attempt.
*/
typedef int (*secp256k1_nonce_function)(
typedef int (*rustsecp256k1_v0_1_0_nonce_function)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
@ -164,13 +164,13 @@ typedef int (*secp256k1_nonce_function)(
#define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9)
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
/** Flags to pass to secp256k1_context_create, secp256k1_context_preallocated_size, and
* secp256k1_context_preallocated_create. */
/** Flags to pass to rustsecp256k1_v0_1_0_context_create, rustsecp256k1_v0_1_0_context_preallocated_size, and
* rustsecp256k1_v0_1_0_context_preallocated_create. */
#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
/** Flag to pass to secp256k1_ec_pubkey_serialize and secp256k1_ec_privkey_export. */
/** Flag to pass to rustsecp256k1_v0_1_0_ec_pubkey_serialize and rustsecp256k1_v0_1_0_ec_privkey_export. */
#define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
#define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION)
@ -186,7 +186,43 @@ typedef int (*secp256k1_nonce_function)(
* API consistency, but currently do not require expensive precomputations or dynamic
* allocations.
*/
SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp;
SECP256K1_API extern const rustsecp256k1_v0_1_0_context *rustsecp256k1_v0_1_0_context_no_precomp;
/** Create a secp256k1 context object (in dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_1_0_preallocated.h.
*
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
*
* See also rustsecp256k1_v0_1_0_context_randomize.
*/
/** Copy a secp256k1 context object (into dynamically allocated memory).
*
* This function uses malloc to allocate memory. It is guaranteed that malloc is
* called at most once for every call of this function. If you need to avoid dynamic
* memory allocation entirely, see the functions in rustsecp256k1_v0_1_0_preallocated.h.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
*/
/** Destroy a secp256k1 context object (created in dynamically allocated memory).
*
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using rustsecp256k1_v0_1_0_context_create
* or rustsecp256k1_v0_1_0_context_clone. If the context has instead been created using
* rustsecp256k1_v0_1_0_context_preallocated_create or rustsecp256k1_v0_1_0_context_preallocated_clone, the
* behaviour is undefined. In that case, rustsecp256k1_v0_1_0_context_preallocated_destroy must
* be used instead.
*
* Args: ctx: an existing context to destroy, constructed using
* rustsecp256k1_v0_1_0_context_create or rustsecp256k1_v0_1_0_context_clone
*/
/** Set a callback function to be called when an illegal argument is passed to
* an API call. It will only trigger for violations that are mentioned
@ -209,11 +245,11 @@ SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp;
* USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build
* has been configured with --enable-external-default-callbacks. Then the
* following two symbols must be provided to link against:
* - void secp256k1_default_illegal_callback_fn(const char* message, void* data);
* - void secp256k1_default_error_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_1_0_default_illegal_callback_fn(const char* message, void* data);
* - void rustsecp256k1_v0_1_0_default_error_callback_fn(const char* message, void* data);
* The library can call these default handlers even before a proper callback data
* pointer could have been set using secp256k1_context_set_illegal_callback or
* secp256k1_context_set_illegal_callback, e.g., when the creation of a context
* pointer could have been set using rustsecp256k1_v0_1_0_context_set_illegal_callback or
* rustsecp256k1_v0_1_0_context_set_illegal_callback, e.g., when the creation of a context
* fails. In this case, the corresponding default handler will be called with
* the data pointer argument set to NULL.
*
@ -223,10 +259,10 @@ SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp;
* (NULL restores the default handler.)
* data: the opaque pointer to pass to fun above.
*
* See also secp256k1_context_set_error_callback.
* See also rustsecp256k1_v0_1_0_context_set_error_callback.
*/
SECP256K1_API void secp256k1_context_set_illegal_callback(
secp256k1_context* ctx,
SECP256K1_API void rustsecp256k1_v0_1_0_context_set_illegal_callback(
rustsecp256k1_v0_1_0_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
@ -237,25 +273,39 @@ SECP256K1_API void secp256k1_context_set_illegal_callback(
* This can only trigger in case of a hardware failure, miscompilation,
* memory corruption, serious bug in the library, or other error would can
* otherwise result in undefined behaviour. It will not trigger due to mere
* incorrect usage of the API (see secp256k1_context_set_illegal_callback
* incorrect usage of the API (see rustsecp256k1_v0_1_0_context_set_illegal_callback
* for that). After this callback returns, anything may happen, including
* crashing.
*
* Args: ctx: an existing context object (cannot be NULL)
* In: fun: a pointer to a function to call when an internal error occurs,
* taking a message and an opaque pointer (NULL restores the
* default handler, see secp256k1_context_set_illegal_callback
* default handler, see rustsecp256k1_v0_1_0_context_set_illegal_callback
* for details).
* data: the opaque pointer to pass to fun above.
*
* See also secp256k1_context_set_illegal_callback.
* See also rustsecp256k1_v0_1_0_context_set_illegal_callback.
*/
SECP256K1_API void secp256k1_context_set_error_callback(
secp256k1_context* ctx,
SECP256K1_API void rustsecp256k1_v0_1_0_context_set_error_callback(
rustsecp256k1_v0_1_0_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
) SECP256K1_ARG_NONNULL(1);
/** Create a secp256k1 scratch space object.
*
* Returns: a newly created scratch space.
* Args: ctx: an existing context object (cannot be NULL)
* In: size: amount of memory to be available as scratch space. Some extra
* (<100 bytes) will be allocated for extra accounting.
*/
/** Destroy a secp256k1 scratch space.
*
* The pointer may not be used afterwards.
* Args: ctx: a secp256k1 context object.
* scratch: space to destroy
*/
/** Parse a variable-length public key into the pubkey object.
*
@ -271,9 +321,9 @@ SECP256K1_API void secp256k1_context_set_error_callback(
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header
* byte 0x06 or 0x07) format public keys.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse(
const secp256k1_context* ctx,
secp256k1_pubkey* pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_parse(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey* pubkey,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -288,16 +338,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse(
* In/Out: outputlen: a pointer to an integer which is initially set to the
* size of output, and is overwritten with the written
* size.
* In: pubkey: a pointer to a secp256k1_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_1_0_pubkey containing an
* initialized public key.
* flags: SECP256K1_EC_COMPRESSED if serialization should be in
* compressed format, otherwise SECP256K1_EC_UNCOMPRESSED.
*/
SECP256K1_API int secp256k1_ec_pubkey_serialize(
const secp256k1_context* ctx,
SECP256K1_API int rustsecp256k1_v0_1_0_ec_pubkey_serialize(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *output,
size_t *outputlen,
const secp256k1_pubkey* pubkey,
const rustsecp256k1_v0_1_0_pubkey* pubkey,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -316,9 +366,9 @@ SECP256K1_API int secp256k1_ec_pubkey_serialize(
* S are zero, the resulting sig value is guaranteed to fail validation for any
* message and public key.
*/
SECP256K1_API int secp256k1_ecdsa_signature_parse_compact(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature* sig,
const unsigned char *input64
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -337,9 +387,9 @@ SECP256K1_API int secp256k1_ecdsa_signature_parse_compact(
* encoded numbers are out of range, signature validation with it is
* guaranteed to fail for every message and public key.
*/
SECP256K1_API int secp256k1_ecdsa_signature_parse_der(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -355,11 +405,11 @@ SECP256K1_API int secp256k1_ecdsa_signature_parse_der(
* if 0 was returned).
* In: sig: a pointer to an initialized signature object
*/
SECP256K1_API int secp256k1_ecdsa_signature_serialize_der(
const secp256k1_context* ctx,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *output,
size_t *outputlen,
const secp256k1_ecdsa_signature* sig
const rustsecp256k1_v0_1_0_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Serialize an ECDSA signature in compact (64 byte) format.
@ -369,12 +419,12 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_der(
* Out: output64: a pointer to a 64-byte array to store the compact serialization
* In: sig: a pointer to an initialized signature object
*
* See secp256k1_ecdsa_signature_parse_compact for details about the encoding.
* See rustsecp256k1_v0_1_0_ecdsa_signature_parse_compact for details about the encoding.
*/
SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact(
const secp256k1_context* ctx,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_serialize_compact(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *output64,
const secp256k1_ecdsa_signature* sig
const rustsecp256k1_v0_1_0_ecdsa_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Verify an ECDSA signature.
@ -390,16 +440,16 @@ SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact(
* form are accepted.
*
* If you need to accept ECDSA signatures from sources that do not obey this
* rule, apply secp256k1_ecdsa_signature_normalize to the signature prior to
* rule, apply rustsecp256k1_v0_1_0_ecdsa_signature_normalize to the signature prior to
* validation, but be aware that doing so results in malleable signatures.
*
* For details, see the comments for that function.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify(
const secp256k1_context* ctx,
const secp256k1_ecdsa_signature *sig,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdsa_verify(
const rustsecp256k1_v0_1_0_context* ctx,
const rustsecp256k1_v0_1_0_ecdsa_signature *sig,
const unsigned char *msg32,
const secp256k1_pubkey *pubkey
const rustsecp256k1_v0_1_0_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Convert a signature to a normalized lower-S form.
@ -439,25 +489,25 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify(
* accept various non-unique encodings, so care should be taken when this
* property is required for an application.
*
* The secp256k1_ecdsa_sign function will by default create signatures in the
* lower-S form, and secp256k1_ecdsa_verify will not accept others. In case
* The rustsecp256k1_v0_1_0_ecdsa_sign function will by default create signatures in the
* lower-S form, and rustsecp256k1_v0_1_0_ecdsa_verify will not accept others. In case
* signatures come from a system that cannot enforce this property,
* secp256k1_ecdsa_signature_normalize must be called before verification.
* rustsecp256k1_v0_1_0_ecdsa_signature_normalize must be called before verification.
*/
SECP256K1_API int secp256k1_ecdsa_signature_normalize(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature *sigout,
const secp256k1_ecdsa_signature *sigin
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_signature_normalize(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature *sigout,
const rustsecp256k1_v0_1_0_ecdsa_signature *sigin
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3);
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
* extra entropy.
*/
SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_rfc6979;
SECP256K1_API extern const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_rfc6979;
/** A default safe nonce generation function (currently equal to secp256k1_nonce_function_rfc6979). */
SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_default;
/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_1_0_nonce_function_rfc6979). */
SECP256K1_API extern const rustsecp256k1_v0_1_0_nonce_function rustsecp256k1_v0_1_0_nonce_function_default;
/** Create an ECDSA signature.
*
@ -467,18 +517,18 @@ SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_def
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
* In: msg32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used
* noncefp:pointer to a nonce generation function. If NULL, rustsecp256k1_v0_1_0_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
*
* The created signature is always in lower-S form. See
* secp256k1_ecdsa_signature_normalize for more details.
* rustsecp256k1_v0_1_0_ecdsa_signature_normalize for more details.
*/
SECP256K1_API int secp256k1_ecdsa_sign(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature *sig,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_sign(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature *sig,
const unsigned char *msg32,
const unsigned char *seckey,
secp256k1_nonce_function noncefp,
rustsecp256k1_v0_1_0_nonce_function noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -489,8 +539,8 @@ SECP256K1_API int secp256k1_ecdsa_sign(
* Args: ctx: pointer to a context object (cannot be NULL)
* In: seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify(
const secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_seckey_verify(
const rustsecp256k1_v0_1_0_context* ctx,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -502,9 +552,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify(
* Out: pubkey: pointer to the created public key (cannot be NULL)
* In: seckey: pointer to a 32-byte private key (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_create(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *pubkey,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -514,8 +564,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create(
* Args: ctx: pointer to a context object
* In/Out: seckey: pointer to the 32-byte private key to be negated (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_negate(
const secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_negate(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
@ -525,9 +575,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_negate(
* Args: ctx: pointer to a context object
* In/Out: pubkey: pointer to the public key to be negated (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_negate(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_negate(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Tweak a private key by adding tweak to it.
@ -539,8 +589,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_negate(
* In/Out: seckey: pointer to a 32-byte private key.
* In: tweak: pointer to a 32-byte tweak.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add(
const secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_tweak_add(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *seckey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -555,9 +605,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add(
* In/Out: pubkey: pointer to a public key object.
* In: tweak: pointer to a 32-byte tweak.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *pubkey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -568,8 +618,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add(
* In/Out: seckey: pointer to a 32-byte private key.
* In: tweak: pointer to a 32-byte tweak.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul(
const secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *seckey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -582,9 +632,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul(
* In/Out: pubkey: pointer to a public key obkect.
* In: tweak: pointer to a 32-byte tweak.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *pubkey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -609,12 +659,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul(
* guaranteed and may change in the future. It is safe to call this function on
* contexts not initialized for signing; then it will have no effect and return 1.
*
* You should call this after secp256k1_context_create or
* secp256k1_context_clone (and secp256k1_context_preallocated_create or
* secp256k1_context_clone, resp.), and you may call this repeatedly afterwards.
* You should call this after rustsecp256k1_v0_1_0_context_create or
* rustsecp256k1_v0_1_0_context_clone (and rustsecp256k1_v0_1_0_context_preallocated_create or
* rustsecp256k1_v0_1_0_context_clone, resp.), and you may call this repeatedly afterwards.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize(
secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_context_randomize(
rustsecp256k1_v0_1_0_context* ctx,
const unsigned char *seed32
) SECP256K1_ARG_NONNULL(1);
@ -627,10 +677,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize(
* In: ins: pointer to array of pointers to public keys (cannot be NULL)
* n: the number of public keys to add together (must be at least 1)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine(
const secp256k1_context* ctx,
secp256k1_pubkey *out,
const secp256k1_pubkey * const * ins,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ec_pubkey_combine(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *out,
const rustsecp256k1_v0_1_0_pubkey * const * ins,
size_t n
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);

View File

@ -15,7 +15,7 @@ extern "C" {
* y: pointer to a 32-byte y coordinate
* data: Arbitrary data pointer that is passed through
*/
typedef int (*secp256k1_ecdh_hash_function)(
typedef int (*rustsecp256k1_v0_1_0_ecdh_hash_function)(
unsigned char *output,
const unsigned char *x,
const unsigned char *y,
@ -23,28 +23,28 @@ typedef int (*secp256k1_ecdh_hash_function)(
);
/** An implementation of SHA256 hash function that applies to compressed public key. */
SECP256K1_API extern const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_sha256;
SECP256K1_API extern const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_sha256;
/** A default ecdh hash function (currently equal to secp256k1_ecdh_hash_function_sha256). */
SECP256K1_API extern const secp256k1_ecdh_hash_function secp256k1_ecdh_hash_function_default;
/** A default ecdh hash function (currently equal to rustsecp256k1_v0_1_0_ecdh_hash_function_sha256). */
SECP256K1_API extern const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_default;
/** Compute an EC Diffie-Hellman secret in constant time
* Returns: 1: exponentiation was successful
* 0: scalar was invalid (zero or overflow)
* Args: ctx: pointer to a context object (cannot be NULL)
* Out: output: pointer to an array to be filled by the function
* In: pubkey: a pointer to a secp256k1_pubkey containing an
* In: pubkey: a pointer to a rustsecp256k1_v0_1_0_pubkey containing an
* initialized public key
* privkey: a 32-byte scalar with which to multiply the point
* hashfp: pointer to a hash function. If NULL, secp256k1_ecdh_hash_function_sha256 is used
* hashfp: pointer to a hash function. If NULL, rustsecp256k1_v0_1_0_ecdh_hash_function_sha256 is used
* data: Arbitrary data pointer that is passed through
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdh(
const secp256k1_context* ctx,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdh(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *output,
const secp256k1_pubkey *pubkey,
const rustsecp256k1_v0_1_0_pubkey *pubkey,
const unsigned char *privkey,
secp256k1_ecdh_hash_function hashfp,
rustsecp256k1_v0_1_0_ecdh_hash_function hashfp,
void *data
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);

View File

@ -16,8 +16,8 @@ extern "C" {
* objects created by functions in secp256k1.h, i.e., they can be passed to any
* API function that excepts a context object (see secp256k1.h for details). The
* only exception is that context objects created by functions in this module
* must be destroyed using secp256k1_context_preallocated_destroy (in this
* module) instead of secp256k1_context_destroy (in secp256k1.h).
* must be destroyed using rustsecp256k1_v0_1_0_context_preallocated_destroy (in this
* module) instead of rustsecp256k1_v0_1_0_context_destroy (in secp256k1.h).
*
* It is guaranteed that functions in by this module will not call malloc or its
* friends realloc, calloc, and free.
@ -27,24 +27,24 @@ extern "C" {
* caller-provided memory.
*
* The purpose of this function is to determine how much memory must be provided
* to secp256k1_context_preallocated_create.
* to rustsecp256k1_v0_1_0_context_preallocated_create.
*
* Returns: the required size of the caller-provided memory block
* In: flags: which parts of the context to initialize.
*/
SECP256K1_API size_t secp256k1_context_preallocated_size(
SECP256K1_API size_t rustsecp256k1_v0_1_0_context_preallocated_size(
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
/** Create a secp256k1 context object in caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
* of size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, which begins with the call to this
* function and ends when a call to secp256k1_context_preallocated_destroy
* function and ends when a call to rustsecp256k1_v0_1_0_context_preallocated_destroy
* (which destroys the context object again) returns. During the lifetime of the
* context object, the caller is obligated not to access this block of memory,
* i.e., the caller may not read or write the memory, e.g., by copying the memory
@ -54,14 +54,14 @@ SECP256K1_API size_t secp256k1_context_preallocated_size(
*
* Returns: a newly created context object.
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least secp256k1_context_preallocated_size(flags)
* size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
* flags: which parts of the context to initialize.
*
* See also secp256k1_context_randomize (in secp256k1.h)
* and secp256k1_context_preallocated_destroy.
* See also rustsecp256k1_v0_1_0_context_randomize (in secp256k1.h)
* and rustsecp256k1_v0_1_0_context_preallocated_destroy.
*/
SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create(
SECP256K1_API rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_create(
void* prealloc,
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
@ -72,28 +72,28 @@ SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create(
* Returns: the required size of the caller-provided memory block.
* In: ctx: an existing context to copy (cannot be NULL)
*/
SECP256K1_API size_t secp256k1_context_preallocated_clone_size(
const secp256k1_context* ctx
SECP256K1_API size_t rustsecp256k1_v0_1_0_context_preallocated_clone_size(
const rustsecp256k1_v0_1_0_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
/** Copy a secp256k1 context object into caller-provided memory.
*
* The caller must provide a pointer to a rewritable contiguous block of memory
* of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
* of size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags) bytes, suitably
* aligned to hold an object of any type.
*
* The block of memory is exclusively owned by the created context object during
* the lifetime of this context object, see the description of
* secp256k1_context_preallocated_create for details.
* rustsecp256k1_v0_1_0_context_preallocated_create for details.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
* In: prealloc: a pointer to a rewritable contiguous block of memory of
* size at least secp256k1_context_preallocated_size(flags)
* size at least rustsecp256k1_v0_1_0_context_preallocated_size(flags)
* bytes, as detailed above (cannot be NULL)
*/
SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone(
const secp256k1_context* ctx,
SECP256K1_API rustsecp256k1_v0_1_0_context* rustsecp256k1_v0_1_0_context_preallocated_clone(
const rustsecp256k1_v0_1_0_context* ctx,
void* prealloc
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT;
@ -103,22 +103,22 @@ SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone(
* The context pointer may not be used afterwards.
*
* The context to destroy must have been created using
* secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone.
* If the context has instead been created using secp256k1_context_create or
* secp256k1_context_clone, the behaviour is undefined. In that case,
* secp256k1_context_destroy must be used instead.
* rustsecp256k1_v0_1_0_context_preallocated_create or rustsecp256k1_v0_1_0_context_preallocated_clone.
* If the context has instead been created using rustsecp256k1_v0_1_0_context_create or
* rustsecp256k1_v0_1_0_context_clone, the behaviour is undefined. In that case,
* rustsecp256k1_v0_1_0_context_destroy must be used instead.
*
* If required, it is the responsibility of the caller to deallocate the block
* of memory properly after this function returns, e.g., by calling free on the
* preallocated pointer given to secp256k1_context_preallocated_create or
* secp256k1_context_preallocated_clone.
* preallocated pointer given to rustsecp256k1_v0_1_0_context_preallocated_create or
* rustsecp256k1_v0_1_0_context_preallocated_clone.
*
* Args: ctx: an existing context to destroy, constructed using
* secp256k1_context_preallocated_create or
* secp256k1_context_preallocated_clone (cannot be NULL)
* rustsecp256k1_v0_1_0_context_preallocated_create or
* rustsecp256k1_v0_1_0_context_preallocated_clone (cannot be NULL)
*/
SECP256K1_API void secp256k1_context_preallocated_destroy(
secp256k1_context* ctx
SECP256K1_API void rustsecp256k1_v0_1_0_context_preallocated_destroy(
rustsecp256k1_v0_1_0_context* ctx
);
#ifdef __cplusplus

View File

@ -14,8 +14,8 @@ extern "C" {
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 65 bytes in size, and can be safely copied/moved.
* If you need to convert to a format suitable for storage or transmission, use
* the secp256k1_ecdsa_signature_serialize_* and
* secp256k1_ecdsa_signature_parse_* functions.
* the rustsecp256k1_v0_1_0_ecdsa_signature_serialize_* and
* rustsecp256k1_v0_1_0_ecdsa_signature_parse_* functions.
*
* Furthermore, it is guaranteed that identical signatures (including their
* recoverability) will have identical representation, so they can be
@ -23,7 +23,7 @@ extern "C" {
*/
typedef struct {
unsigned char data[65];
} secp256k1_ecdsa_recoverable_signature;
} rustsecp256k1_v0_1_0_ecdsa_recoverable_signature;
/** Parse a compact ECDSA signature (64 bytes + recovery id).
*
@ -33,9 +33,9 @@ typedef struct {
* In: input64: a pointer to a 64-byte compact signature
* recid: the recovery id (0, 1, 2 or 3)
*/
SECP256K1_API int secp256k1_ecdsa_recoverable_signature_parse_compact(
const secp256k1_context* ctx,
secp256k1_ecdsa_recoverable_signature* sig,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig,
const unsigned char *input64,
int recid
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
@ -46,10 +46,10 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_parse_compact(
* Out: sig: a pointer to a normal signature (cannot be NULL).
* In: sigin: a pointer to a recoverable signature (cannot be NULL).
*/
SECP256K1_API int secp256k1_ecdsa_recoverable_signature_convert(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
const secp256k1_ecdsa_recoverable_signature* sigin
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_signature* sig,
const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sigin
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Serialize an ECDSA signature in compact format (64 bytes + recovery id).
@ -60,11 +60,11 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_convert(
* recid: a pointer to an integer to hold the recovery id (can be NULL).
* In: sig: a pointer to an initialized signature object (cannot be NULL)
*/
SECP256K1_API int secp256k1_ecdsa_recoverable_signature_serialize_compact(
const secp256k1_context* ctx,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(
const rustsecp256k1_v0_1_0_context* ctx,
unsigned char *output64,
int *recid,
const secp256k1_ecdsa_recoverable_signature* sig
const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Create a recoverable ECDSA signature.
@ -75,15 +75,15 @@ SECP256K1_API int secp256k1_ecdsa_recoverable_signature_serialize_compact(
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
* In: msg32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used
* noncefp:pointer to a nonce generation function. If NULL, rustsecp256k1_v0_1_0_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
*/
SECP256K1_API int secp256k1_ecdsa_sign_recoverable(
const secp256k1_context* ctx,
secp256k1_ecdsa_recoverable_signature *sig,
SECP256K1_API int rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *sig,
const unsigned char *msg32,
const unsigned char *seckey,
secp256k1_nonce_function noncefp,
rustsecp256k1_v0_1_0_nonce_function noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
@ -96,10 +96,10 @@ SECP256K1_API int secp256k1_ecdsa_sign_recoverable(
* In: sig: pointer to initialized signature that supports pubkey recovery (cannot be NULL)
* msg32: the 32-byte message hash assumed to be signed (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_recover(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const secp256k1_ecdsa_recoverable_signature *sig,
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_1_0_ecdsa_recover(
const rustsecp256k1_v0_1_0_context* ctx,
rustsecp256k1_v0_1_0_pubkey *pubkey,
const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *sig,
const unsigned char *msg32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);

View File

@ -5,8 +5,8 @@ import sys
load("group_prover.sage")
load("weierstrass_prover.sage")
def formula_secp256k1_gej_double_var(a):
"""libsecp256k1's secp256k1_gej_double_var, used by various addition functions"""
def formula_rustsecp256k1_v0_1_0_gej_double_var(a):
"""libsecp256k1's rustsecp256k1_v0_1_0_gej_double_var, used by various addition functions"""
rz = a.Z * a.Y
rz = rz * 2
t1 = a.X^2
@ -29,8 +29,8 @@ def formula_secp256k1_gej_double_var(a):
ry = ry + t2
return jacobianpoint(rx, ry, rz)
def formula_secp256k1_gej_add_var(branch, a, b):
"""libsecp256k1's secp256k1_gej_add_var"""
def formula_rustsecp256k1_v0_1_0_gej_add_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_1_0_gej_add_var"""
if branch == 0:
return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
if branch == 1:
@ -48,7 +48,7 @@ def formula_secp256k1_gej_add_var(branch, a, b):
i = -s1
i = i + s2
if branch == 2:
r = formula_secp256k1_gej_double_var(a)
r = formula_rustsecp256k1_v0_1_0_gej_double_var(a)
return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r)
if branch == 3:
return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -71,8 +71,8 @@ def formula_secp256k1_gej_add_var(branch, a, b):
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_secp256k1_gej_add_ge_var(branch, a, b):
"""libsecp256k1's secp256k1_gej_add_ge_var, which assume bz==1"""
def formula_rustsecp256k1_v0_1_0_gej_add_ge_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_1_0_gej_add_ge_var, which assume bz==1"""
if branch == 0:
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b)
if branch == 1:
@ -88,7 +88,7 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b):
i = -s1
i = i + s2
if (branch == 2):
r = formula_secp256k1_gej_double_var(a)
r = formula_rustsecp256k1_v0_1_0_gej_double_var(a)
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if (branch == 3):
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -110,8 +110,8 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b):
ry = ry + h3
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_secp256k1_gej_add_zinv_var(branch, a, b):
"""libsecp256k1's secp256k1_gej_add_zinv_var"""
def formula_rustsecp256k1_v0_1_0_gej_add_zinv_var(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_1_0_gej_add_zinv_var"""
bzinv = b.Z^(-1)
if branch == 0:
return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a)
@ -134,7 +134,7 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b):
i = -s1
i = i + s2
if branch == 2:
r = formula_secp256k1_gej_double_var(a)
r = formula_rustsecp256k1_v0_1_0_gej_double_var(a)
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if branch == 3:
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
@ -157,8 +157,8 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b):
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
def formula_secp256k1_gej_add_ge(branch, a, b):
"""libsecp256k1's secp256k1_gej_add_ge"""
def formula_rustsecp256k1_v0_1_0_gej_add_ge(branch, a, b):
"""libsecp256k1's rustsecp256k1_v0_1_0_gej_add_ge"""
zeroes = {}
nonzeroes = {}
a_infinity = False
@ -229,8 +229,8 @@ def formula_secp256k1_gej_add_ge(branch, a, b):
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity())
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz))
def formula_secp256k1_gej_add_ge_old(branch, a, b):
"""libsecp256k1's old secp256k1_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
def formula_rustsecp256k1_v0_1_0_gej_add_ge_old(branch, a, b):
"""libsecp256k1's old rustsecp256k1_v0_1_0_gej_add_ge, which fails when ay+by=0 but ax!=bx"""
a_infinity = (branch & 1) != 0
zero = {}
nonzero = {}
@ -292,15 +292,15 @@ def formula_secp256k1_gej_add_ge_old(branch, a, b):
return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz))
if __name__ == "__main__":
check_symbolic_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var)
check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var)
check_symbolic_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var)
check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge)
check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_ge_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_zinv_var)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_1_0_gej_add_ge)
check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_1_0_gej_add_ge_old)
if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive":
check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var, 43)
check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var, 43)
check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var, 43)
check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge, 43)
check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_ge_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_1_0_gej_add_zinv_var, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_1_0_gej_add_ge, 43)
check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_1_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_1_0_gej_add_ge_old, 43)

View File

@ -27,8 +27,8 @@ Note:
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
.align 2
.global secp256k1_fe_mul_inner
.type secp256k1_fe_mul_inner, %function
.global rustsecp256k1_v0_1_0_fe_mul_inner
.type rustsecp256k1_v0_1_0_fe_mul_inner, %function
@ Arguments:
@ r0 r Restrict: can overlap with a, not with b
@ r1 a
@ -36,7 +36,7 @@ Note:
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_mul_inner:
rustsecp256k1_v0_1_0_fe_mul_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
@ -511,18 +511,18 @@ secp256k1_fe_mul_inner:
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner
.size rustsecp256k1_v0_1_0_fe_mul_inner, .-rustsecp256k1_v0_1_0_fe_mul_inner
.align 2
.global secp256k1_fe_sqr_inner
.type secp256k1_fe_sqr_inner, %function
.global rustsecp256k1_v0_1_0_fe_sqr_inner
.type rustsecp256k1_v0_1_0_fe_sqr_inner, %function
@ Arguments:
@ r0 r Can overlap with a
@ r1 a
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_sqr_inner:
rustsecp256k1_v0_1_0_fe_sqr_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
@ -909,5 +909,5 @@ secp256k1_fe_sqr_inner:
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner
.size rustsecp256k1_v0_1_0_fe_sqr_inner, .-rustsecp256k1_v0_1_0_fe_sqr_inner

View File

@ -12,8 +12,8 @@
#include "bench.h"
typedef struct {
secp256k1_context *ctx;
secp256k1_pubkey point;
rustsecp256k1_v0_1_0_context *ctx;
rustsecp256k1_v0_1_0_pubkey point;
unsigned char scalar[32];
} bench_ecdh_data;
@ -29,11 +29,11 @@ static void bench_ecdh_setup(void* arg) {
};
/* create a context with no capabilities */
data->ctx = secp256k1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT);
data->ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT);
for (i = 0; i < 32; i++) {
data->scalar[i] = i + 1;
}
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
}
static void bench_ecdh(void* arg) {
@ -42,7 +42,7 @@ static void bench_ecdh(void* arg) {
bench_ecdh_data *data = (bench_ecdh_data*)arg;
for (i = 0; i < 20000; i++) {
CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1);
}
}

View File

@ -22,13 +22,13 @@
typedef struct {
/* Setup once in advance */
secp256k1_context* ctx;
secp256k1_scratch_space* scratch;
secp256k1_scalar* scalars;
secp256k1_ge* pubkeys;
secp256k1_scalar* seckeys;
secp256k1_gej* expected_output;
secp256k1_ecmult_multi_func ecmult_multi;
rustsecp256k1_v0_1_0_context* ctx;
rustsecp256k1_v0_1_0_scratch_space* scratch;
rustsecp256k1_v0_1_0_scalar* scalars;
rustsecp256k1_v0_1_0_ge* pubkeys;
rustsecp256k1_v0_1_0_scalar* seckeys;
rustsecp256k1_v0_1_0_gej* expected_output;
rustsecp256k1_v0_1_0_ecmult_multi_func ecmult_multi;
/* Changes per test */
size_t count;
@ -39,15 +39,15 @@ typedef struct {
size_t offset2;
/* Test output. */
secp256k1_gej* output;
rustsecp256k1_v0_1_0_gej* output;
} bench_data;
static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) {
static int bench_callback(rustsecp256k1_v0_1_0_scalar* sc, rustsecp256k1_v0_1_0_ge* ge, size_t idx, void* arg) {
bench_data* data = (bench_data*)arg;
if (data->includes_g) ++idx;
if (idx == 0) {
*sc = data->scalars[data->offset1];
*ge = secp256k1_ge_const_g;
*ge = rustsecp256k1_v0_1_0_ge_const_g;
} else {
*sc = data->scalars[(data->offset1 + idx) % POINTS];
*ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS];
@ -82,14 +82,14 @@ static void bench_ecmult_teardown(void* arg) {
size_t iter;
/* Verify the results in teardown, to avoid doing comparisons while benchmarking. */
for (iter = 0; iter < iters; ++iter) {
secp256k1_gej tmp;
secp256k1_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
CHECK(secp256k1_gej_is_infinity(&tmp));
rustsecp256k1_v0_1_0_gej tmp;
rustsecp256k1_v0_1_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL);
CHECK(rustsecp256k1_v0_1_0_gej_is_infinity(&tmp));
}
}
static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) {
secp256k1_sha256 sha256;
static void generate_scalar(uint32_t num, rustsecp256k1_v0_1_0_scalar* scalar) {
rustsecp256k1_v0_1_0_sha256 sha256;
unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
unsigned char buf[32];
int overflow = 0;
@ -97,16 +97,16 @@ static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) {
c[7] = num >> 8;
c[8] = num >> 16;
c[9] = num >> 24;
secp256k1_sha256_initialize(&sha256);
secp256k1_sha256_write(&sha256, c, sizeof(c));
secp256k1_sha256_finalize(&sha256, buf);
secp256k1_scalar_set_b32(scalar, buf, &overflow);
rustsecp256k1_v0_1_0_sha256_initialize(&sha256);
rustsecp256k1_v0_1_0_sha256_write(&sha256, c, sizeof(c));
rustsecp256k1_v0_1_0_sha256_finalize(&sha256, buf);
rustsecp256k1_v0_1_0_scalar_set_b32(scalar, buf, &overflow);
CHECK(!overflow);
}
static void run_test(bench_data* data, size_t count, int includes_g) {
char str[32];
static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
static const rustsecp256k1_v0_1_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
size_t iters = 1 + ITERS / count;
size_t iter;
@ -117,15 +117,15 @@ static void run_test(bench_data* data, size_t count, int includes_g) {
data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS;
data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS;
for (iter = 0; iter < iters; ++iter) {
secp256k1_scalar tmp;
secp256k1_scalar total = data->scalars[(data->offset1++) % POINTS];
rustsecp256k1_v0_1_0_scalar tmp;
rustsecp256k1_v0_1_0_scalar total = data->scalars[(data->offset1++) % POINTS];
size_t i = 0;
for (i = 0; i + 1 < count; ++i) {
secp256k1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
secp256k1_scalar_add(&total, &total, &tmp);
rustsecp256k1_v0_1_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]);
rustsecp256k1_v0_1_0_scalar_add(&total, &total, &tmp);
}
secp256k1_scalar_negate(&total, &total);
secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total);
rustsecp256k1_v0_1_0_scalar_negate(&total, &total);
rustsecp256k1_v0_1_0_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total);
}
/* Run the benchmark. */
@ -136,25 +136,25 @@ static void run_test(bench_data* data, size_t count, int includes_g) {
int main(int argc, char **argv) {
bench_data data;
int i, p;
secp256k1_gej* pubkeys_gej;
rustsecp256k1_v0_1_0_gej* pubkeys_gej;
size_t scratch_size;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size);
data.ecmult_multi = secp256k1_ecmult_multi_var;
data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
scratch_size = rustsecp256k1_v0_1_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
data.scratch = rustsecp256k1_v0_1_0_scratch_space_create(data.ctx, scratch_size);
data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_multi_var;
if (argc > 1) {
if(have_flag(argc, argv, "pippenger_wnaf")) {
printf("Using pippenger_wnaf:\n");
data.ecmult_multi = secp256k1_ecmult_pippenger_batch_single;
data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_pippenger_batch_single;
} else if(have_flag(argc, argv, "strauss_wnaf")) {
printf("Using strauss_wnaf:\n");
data.ecmult_multi = secp256k1_ecmult_strauss_batch_single;
data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_strauss_batch_single;
} else if(have_flag(argc, argv, "simple")) {
printf("Using simple algorithm:\n");
data.ecmult_multi = secp256k1_ecmult_multi_var;
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
data.ecmult_multi = rustsecp256k1_v0_1_0_ecmult_multi_var;
rustsecp256k1_v0_1_0_scratch_space_destroy(data.ctx, data.scratch);
data.scratch = NULL;
} else {
fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]);
@ -164,24 +164,24 @@ int main(int argc, char **argv) {
}
/* Allocate stuff */
data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS);
data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS);
data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS);
data.expected_output = malloc(sizeof(secp256k1_gej) * (ITERS + 1));
data.output = malloc(sizeof(secp256k1_gej) * (ITERS + 1));
data.scalars = malloc(sizeof(rustsecp256k1_v0_1_0_scalar) * POINTS);
data.seckeys = malloc(sizeof(rustsecp256k1_v0_1_0_scalar) * POINTS);
data.pubkeys = malloc(sizeof(rustsecp256k1_v0_1_0_ge) * POINTS);
data.expected_output = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * (ITERS + 1));
data.output = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * (ITERS + 1));
/* Generate a set of scalars, and private/public keypairs. */
pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS);
secp256k1_gej_set_ge(&pubkeys_gej[0], &secp256k1_ge_const_g);
secp256k1_scalar_set_int(&data.seckeys[0], 1);
pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_1_0_gej) * POINTS);
rustsecp256k1_v0_1_0_gej_set_ge(&pubkeys_gej[0], &rustsecp256k1_v0_1_0_ge_const_g);
rustsecp256k1_v0_1_0_scalar_set_int(&data.seckeys[0], 1);
for (i = 0; i < POINTS; ++i) {
generate_scalar(i, &data.scalars[i]);
if (i) {
secp256k1_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL);
secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
rustsecp256k1_v0_1_0_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL);
rustsecp256k1_v0_1_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
}
}
secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS);
rustsecp256k1_v0_1_0_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS);
free(pubkeys_gej);
for (i = 1; i <= 8; ++i) {
@ -194,9 +194,9 @@ int main(int argc, char **argv) {
}
}
if (data.scratch != NULL) {
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
rustsecp256k1_v0_1_0_scratch_space_destroy(data.ctx, data.scratch);
}
secp256k1_context_destroy(data.ctx);
rustsecp256k1_v0_1_0_context_destroy(data.ctx);
free(data.scalars);
free(data.pubkeys);
free(data.seckeys);

View File

@ -19,10 +19,10 @@
#include "secp256k1.c"
typedef struct {
secp256k1_scalar scalar_x, scalar_y;
secp256k1_fe fe_x, fe_y;
secp256k1_ge ge_x, ge_y;
secp256k1_gej gej_x, gej_y;
rustsecp256k1_v0_1_0_scalar scalar_x, scalar_y;
rustsecp256k1_v0_1_0_fe fe_x, fe_y;
rustsecp256k1_v0_1_0_ge ge_x, ge_y;
rustsecp256k1_v0_1_0_gej gej_x, gej_y;
unsigned char data[64];
int wnaf[256];
} bench_inv;
@ -44,14 +44,14 @@ void bench_setup(void* arg) {
0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
};
secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL);
secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL);
secp256k1_fe_set_b32(&data->fe_x, init_x);
secp256k1_fe_set_b32(&data->fe_y, init_y);
CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0));
CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1));
secp256k1_gej_set_ge(&data->gej_x, &data->ge_x);
secp256k1_gej_set_ge(&data->gej_y, &data->ge_y);
rustsecp256k1_v0_1_0_scalar_set_b32(&data->scalar_x, init_x, NULL);
rustsecp256k1_v0_1_0_scalar_set_b32(&data->scalar_y, init_y, NULL);
rustsecp256k1_v0_1_0_fe_set_b32(&data->fe_x, init_x);
rustsecp256k1_v0_1_0_fe_set_b32(&data->fe_y, init_y);
CHECK(rustsecp256k1_v0_1_0_ge_set_xo_var(&data->ge_x, &data->fe_x, 0));
CHECK(rustsecp256k1_v0_1_0_ge_set_xo_var(&data->ge_y, &data->fe_y, 1));
rustsecp256k1_v0_1_0_gej_set_ge(&data->gej_x, &data->ge_x);
rustsecp256k1_v0_1_0_gej_set_ge(&data->gej_y, &data->ge_y);
memcpy(data->data, init_x, 32);
memcpy(data->data + 32, init_y, 32);
}
@ -61,7 +61,7 @@ void bench_scalar_add(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -70,7 +70,7 @@ void bench_scalar_negate(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_negate(&data->scalar_x, &data->scalar_x);
}
}
@ -79,7 +79,7 @@ void bench_scalar_sqr(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_sqr(&data->scalar_x, &data->scalar_x);
}
}
@ -88,7 +88,7 @@ void bench_scalar_mul(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -98,9 +98,9 @@ void bench_scalar_split(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_scalar l, r;
secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar l, r;
rustsecp256k1_v0_1_0_scalar_split_lambda(&l, &r, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
#endif
@ -110,8 +110,8 @@ void bench_scalar_inverse(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000; i++) {
secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar_inverse(&data->scalar_x, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -120,8 +120,8 @@ void bench_scalar_inverse_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000; i++) {
secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar_inverse_var(&data->scalar_x, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -130,7 +130,7 @@ void bench_field_normalize(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_fe_normalize(&data->fe_x);
rustsecp256k1_v0_1_0_fe_normalize(&data->fe_x);
}
}
@ -139,7 +139,7 @@ void bench_field_normalize_weak(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_fe_normalize_weak(&data->fe_x);
rustsecp256k1_v0_1_0_fe_normalize_weak(&data->fe_x);
}
}
@ -148,7 +148,7 @@ void bench_field_mul(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y);
rustsecp256k1_v0_1_0_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y);
}
}
@ -157,7 +157,7 @@ void bench_field_sqr(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_fe_sqr(&data->fe_x, &data->fe_x);
rustsecp256k1_v0_1_0_fe_sqr(&data->fe_x, &data->fe_x);
}
}
@ -166,8 +166,8 @@ void bench_field_inverse(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_fe_inv(&data->fe_x, &data->fe_x);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
rustsecp256k1_v0_1_0_fe_inv(&data->fe_x, &data->fe_x);
rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y);
}
}
@ -176,20 +176,20 @@ void bench_field_inverse_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_fe_inv_var(&data->fe_x, &data->fe_x);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
rustsecp256k1_v0_1_0_fe_inv_var(&data->fe_x, &data->fe_x);
rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y);
}
}
void bench_field_sqrt(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_fe t;
rustsecp256k1_v0_1_0_fe t;
for (i = 0; i < 20000; i++) {
t = data->fe_x;
secp256k1_fe_sqrt(&data->fe_x, &t);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
rustsecp256k1_v0_1_0_fe_sqrt(&data->fe_x, &t);
rustsecp256k1_v0_1_0_fe_add(&data->fe_x, &data->fe_y);
}
}
@ -198,7 +198,7 @@ void bench_group_double_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL);
rustsecp256k1_v0_1_0_gej_double_var(&data->gej_x, &data->gej_x, NULL);
}
}
@ -207,7 +207,7 @@ void bench_group_add_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL);
rustsecp256k1_v0_1_0_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL);
}
}
@ -216,7 +216,7 @@ void bench_group_add_affine(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y);
rustsecp256k1_v0_1_0_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y);
}
}
@ -225,7 +225,7 @@ void bench_group_add_affine_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL);
rustsecp256k1_v0_1_0_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL);
}
}
@ -234,7 +234,7 @@ void bench_group_jacobi_var(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_gej_has_quad_y_var(&data->gej_x);
rustsecp256k1_v0_1_0_gej_has_quad_y_var(&data->gej_x);
}
}
@ -243,8 +243,8 @@ void bench_ecmult_wnaf(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -253,8 +253,8 @@ void bench_wnaf_const(void* arg) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
rustsecp256k1_v0_1_0_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256);
rustsecp256k1_v0_1_0_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
@ -262,35 +262,35 @@ void bench_wnaf_const(void* arg) {
void bench_sha256(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_sha256 sha;
rustsecp256k1_v0_1_0_sha256 sha;
for (i = 0; i < 20000; i++) {
secp256k1_sha256_initialize(&sha);
secp256k1_sha256_write(&sha, data->data, 32);
secp256k1_sha256_finalize(&sha, data->data);
rustsecp256k1_v0_1_0_sha256_initialize(&sha);
rustsecp256k1_v0_1_0_sha256_write(&sha, data->data, 32);
rustsecp256k1_v0_1_0_sha256_finalize(&sha, data->data);
}
}
void bench_hmac_sha256(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_hmac_sha256 hmac;
rustsecp256k1_v0_1_0_hmac_sha256 hmac;
for (i = 0; i < 20000; i++) {
secp256k1_hmac_sha256_initialize(&hmac, data->data, 32);
secp256k1_hmac_sha256_write(&hmac, data->data, 32);
secp256k1_hmac_sha256_finalize(&hmac, data->data);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, data->data, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, data->data, 32);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, data->data);
}
}
void bench_rfc6979_hmac_sha256(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_rfc6979_hmac_sha256 rng;
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng;
for (i = 0; i < 20000; i++) {
secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
}
}
@ -298,7 +298,7 @@ void bench_context_verify(void* arg) {
int i;
(void)arg;
for (i = 0; i < 20; i++) {
secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_VERIFY));
rustsecp256k1_v0_1_0_context_destroy(rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY));
}
}
@ -306,7 +306,7 @@ void bench_context_sign(void* arg) {
int i;
(void)arg;
for (i = 0; i < 200; i++) {
secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_SIGN));
rustsecp256k1_v0_1_0_context_destroy(rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN));
}
}
@ -314,14 +314,14 @@ void bench_context_sign(void* arg) {
void bench_num_jacobi(void* arg) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_num nx, norder;
rustsecp256k1_v0_1_0_num nx, norder;
secp256k1_scalar_get_num(&nx, &data->scalar_x);
secp256k1_scalar_order_get_num(&norder);
secp256k1_scalar_get_num(&norder, &data->scalar_y);
rustsecp256k1_v0_1_0_scalar_get_num(&nx, &data->scalar_x);
rustsecp256k1_v0_1_0_scalar_order_get_num(&norder);
rustsecp256k1_v0_1_0_scalar_get_num(&norder, &data->scalar_y);
for (i = 0; i < 200000; i++) {
secp256k1_num_jacobi(&nx, &norder);
rustsecp256k1_v0_1_0_num_jacobi(&nx, &norder);
}
}
#endif

View File

@ -10,7 +10,7 @@
#include "bench.h"
typedef struct {
secp256k1_context *ctx;
rustsecp256k1_v0_1_0_context *ctx;
unsigned char msg[32];
unsigned char sig[64];
} bench_recover_data;
@ -18,16 +18,16 @@ typedef struct {
void bench_recover(void* arg) {
int i;
bench_recover_data *data = (bench_recover_data*)arg;
secp256k1_pubkey pubkey;
rustsecp256k1_v0_1_0_pubkey pubkey;
unsigned char pubkeyc[33];
for (i = 0; i < 20000; i++) {
int j;
size_t pubkeylen = 33;
secp256k1_ecdsa_recoverable_signature sig;
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
CHECK(secp256k1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg));
CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature sig;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg));
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
for (j = 0; j < 32; j++) {
data->sig[j + 32] = data->msg[j]; /* Move former message to S. */
data->msg[j] = data->sig[j]; /* Move former R to message. */
@ -51,10 +51,10 @@ void bench_recover_setup(void* arg) {
int main(void) {
bench_recover_data data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY);
run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
rustsecp256k1_v0_1_0_context_destroy(data.ctx);
return 0;
}

View File

@ -9,7 +9,7 @@
#include "bench.h"
typedef struct {
secp256k1_context* ctx;
rustsecp256k1_v0_1_0_context* ctx;
unsigned char msg[32];
unsigned char key[32];
} bench_sign;
@ -34,9 +34,9 @@ static void bench_sign_run(void* arg) {
for (i = 0; i < 20000; i++) {
size_t siglen = 74;
int j;
secp256k1_ecdsa_signature signature;
CHECK(secp256k1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
CHECK(secp256k1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
rustsecp256k1_v0_1_0_ecdsa_signature signature;
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
for (j = 0; j < 32; j++) {
data->msg[j] = sig[j];
data->key[j] = sig[j + 32];
@ -47,10 +47,10 @@ static void bench_sign_run(void* arg) {
int main(void) {
bench_sign data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN);
run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
rustsecp256k1_v0_1_0_context_destroy(data.ctx);
return 0;
}

View File

@ -18,7 +18,7 @@
#endif
typedef struct {
secp256k1_context *ctx;
rustsecp256k1_v0_1_0_context *ctx;
unsigned char msg[32];
unsigned char key[32];
unsigned char sig[72];
@ -35,14 +35,14 @@ static void benchmark_verify(void* arg) {
benchmark_verify_t* data = (benchmark_verify_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_pubkey pubkey;
secp256k1_ecdsa_signature sig;
rustsecp256k1_v0_1_0_pubkey pubkey;
rustsecp256k1_v0_1_0_ecdsa_signature sig;
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
CHECK(secp256k1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
@ -81,11 +81,11 @@ static void benchmark_verify_openssl(void* arg) {
int main(void) {
int i;
secp256k1_pubkey pubkey;
secp256k1_ecdsa_signature sig;
rustsecp256k1_v0_1_0_pubkey pubkey;
rustsecp256k1_v0_1_0_ecdsa_signature sig;
benchmark_verify_t data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
data.ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
for (i = 0; i < 32; i++) {
data.msg[i] = 1 + i;
@ -94,11 +94,11 @@ int main(void) {
data.key[i] = 33 + i;
}
data.siglen = 72;
CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key));
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(data.ctx, &pubkey, data.key));
data.pubkeylen = 33;
CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000);
#ifdef ENABLE_OPENSSL_TESTS
@ -107,6 +107,6 @@ int main(void) {
EC_GROUP_free(data.ec_group);
#endif
secp256k1_context_destroy(data.ctx);
rustsecp256k1_v0_1_0_context_destroy(data.ctx);
return 0;
}

View File

@ -0,0 +1,21 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECDSA_H
#define SECP256K1_ECDSA_H
#include <stddef.h>
#include "scalar.h"
#include "group.h"
#include "ecmult.h"
static int rustsecp256k1_v0_1_0_ecdsa_sig_parse(rustsecp256k1_v0_1_0_scalar *r, rustsecp256k1_v0_1_0_scalar *s, const unsigned char *sig, size_t size);
static int rustsecp256k1_v0_1_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_1_0_scalar *r, const rustsecp256k1_v0_1_0_scalar *s);
static int rustsecp256k1_v0_1_0_ecdsa_sig_verify(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar* r, const rustsecp256k1_v0_1_0_scalar* s, const rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message);
static int rustsecp256k1_v0_1_0_ecdsa_sig_sign(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_scalar* r, rustsecp256k1_v0_1_0_scalar* s, const rustsecp256k1_v0_1_0_scalar *seckey, const rustsecp256k1_v0_1_0_scalar *message, const rustsecp256k1_v0_1_0_scalar *nonce, int *recid);
#endif /* SECP256K1_ECDSA_H */

View File

@ -28,7 +28,7 @@
* sage: '%x' % (EllipticCurve ([F (a), F (b)]).order())
* 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141'
*/
static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_1_0_fe rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
);
@ -42,11 +42,11 @@ static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST
* sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order())
* '14551231950b75fc4402da1722fc9baee'
*/
static const secp256k1_fe secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
static const rustsecp256k1_v0_1_0_fe rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
);
static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned char *sigend) {
static int rustsecp256k1_v0_1_0_der_read_len(const unsigned char **sigp, const unsigned char *sigend) {
int lenleft, b1;
size_t ret = 0;
if (*sigp >= sigend) {
@ -96,7 +96,7 @@ static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned cha
return ret;
}
static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
static int rustsecp256k1_v0_1_0_der_parse_integer(rustsecp256k1_v0_1_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) {
int overflow = 0;
unsigned char ra[32] = {0};
int rlen;
@ -106,7 +106,7 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char
return 0;
}
(*sig)++;
rlen = secp256k1_der_read_len(sig, sigend);
rlen = rustsecp256k1_v0_1_0_der_read_len(sig, sigend);
if (rlen <= 0 || (*sig) + rlen > sigend) {
/* Exceeds bounds or not at least length 1 (X.690-0207 8.3.1). */
return 0;
@ -133,23 +133,23 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char
}
if (!overflow) {
memcpy(ra + 32 - rlen, *sig, rlen);
secp256k1_scalar_set_b32(r, ra, &overflow);
rustsecp256k1_v0_1_0_scalar_set_b32(r, ra, &overflow);
}
if (overflow) {
secp256k1_scalar_set_int(r, 0);
rustsecp256k1_v0_1_0_scalar_set_int(r, 0);
}
(*sig) += rlen;
return 1;
}
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, const unsigned char *sig, size_t size) {
static int rustsecp256k1_v0_1_0_ecdsa_sig_parse(rustsecp256k1_v0_1_0_scalar *rr, rustsecp256k1_v0_1_0_scalar *rs, const unsigned char *sig, size_t size) {
const unsigned char *sigend = sig + size;
int rlen;
if (sig == sigend || *(sig++) != 0x30) {
/* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */
return 0;
}
rlen = secp256k1_der_read_len(&sig, sigend);
rlen = rustsecp256k1_v0_1_0_der_read_len(&sig, sigend);
if (rlen < 0 || sig + rlen > sigend) {
/* Tuple exceeds bounds */
return 0;
@ -159,10 +159,10 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs,
return 0;
}
if (!secp256k1_der_parse_integer(rr, &sig, sigend)) {
if (!rustsecp256k1_v0_1_0_der_parse_integer(rr, &sig, sigend)) {
return 0;
}
if (!secp256k1_der_parse_integer(rs, &sig, sigend)) {
if (!rustsecp256k1_v0_1_0_der_parse_integer(rs, &sig, sigend)) {
return 0;
}
@ -174,12 +174,12 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs,
return 1;
}
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar* ar, const secp256k1_scalar* as) {
static int rustsecp256k1_v0_1_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_1_0_scalar* ar, const rustsecp256k1_v0_1_0_scalar* as) {
unsigned char r[33] = {0}, s[33] = {0};
unsigned char *rp = r, *sp = s;
size_t lenR = 33, lenS = 33;
secp256k1_scalar_get_b32(&r[1], ar);
secp256k1_scalar_get_b32(&s[1], as);
rustsecp256k1_v0_1_0_scalar_get_b32(&r[1], ar);
rustsecp256k1_v0_1_0_scalar_get_b32(&s[1], as);
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
if (*size < 6+lenS+lenR) {
@ -198,42 +198,42 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const
return 1;
}
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) {
static int rustsecp256k1_v0_1_0_ecdsa_sig_verify(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar *sigr, const rustsecp256k1_v0_1_0_scalar *sigs, const rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message) {
unsigned char c[32];
secp256k1_scalar sn, u1, u2;
rustsecp256k1_v0_1_0_scalar sn, u1, u2;
#if !defined(EXHAUSTIVE_TEST_ORDER)
secp256k1_fe xr;
rustsecp256k1_v0_1_0_fe xr;
#endif
secp256k1_gej pubkeyj;
secp256k1_gej pr;
rustsecp256k1_v0_1_0_gej pubkeyj;
rustsecp256k1_v0_1_0_gej pr;
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
if (rustsecp256k1_v0_1_0_scalar_is_zero(sigr) || rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) {
return 0;
}
secp256k1_scalar_inverse_var(&sn, sigs);
secp256k1_scalar_mul(&u1, &sn, message);
secp256k1_scalar_mul(&u2, &sn, sigr);
secp256k1_gej_set_ge(&pubkeyj, pubkey);
secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
if (secp256k1_gej_is_infinity(&pr)) {
rustsecp256k1_v0_1_0_scalar_inverse_var(&sn, sigs);
rustsecp256k1_v0_1_0_scalar_mul(&u1, &sn, message);
rustsecp256k1_v0_1_0_scalar_mul(&u2, &sn, sigr);
rustsecp256k1_v0_1_0_gej_set_ge(&pubkeyj, pubkey);
rustsecp256k1_v0_1_0_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
if (rustsecp256k1_v0_1_0_gej_is_infinity(&pr)) {
return 0;
}
#if defined(EXHAUSTIVE_TEST_ORDER)
{
secp256k1_scalar computed_r;
secp256k1_ge pr_ge;
secp256k1_ge_set_gej(&pr_ge, &pr);
secp256k1_fe_normalize(&pr_ge.x);
rustsecp256k1_v0_1_0_scalar computed_r;
rustsecp256k1_v0_1_0_ge pr_ge;
rustsecp256k1_v0_1_0_ge_set_gej(&pr_ge, &pr);
rustsecp256k1_v0_1_0_fe_normalize(&pr_ge.x);
secp256k1_fe_get_b32(c, &pr_ge.x);
secp256k1_scalar_set_b32(&computed_r, c, NULL);
return secp256k1_scalar_eq(sigr, &computed_r);
rustsecp256k1_v0_1_0_fe_get_b32(c, &pr_ge.x);
rustsecp256k1_v0_1_0_scalar_set_b32(&computed_r, c, NULL);
return rustsecp256k1_v0_1_0_scalar_eq(sigr, &computed_r);
}
#else
secp256k1_scalar_get_b32(c, sigr);
secp256k1_fe_set_b32(&xr, c);
rustsecp256k1_v0_1_0_scalar_get_b32(c, sigr);
rustsecp256k1_v0_1_0_fe_set_b32(&xr, c);
/** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
* in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p),
@ -249,18 +249,18 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const
* <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x)
*
* Thus, we can avoid the inversion, but we have to check both cases separately.
* secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
* rustsecp256k1_v0_1_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
*/
if (secp256k1_gej_eq_x_var(&xr, &pr)) {
if (rustsecp256k1_v0_1_0_gej_eq_x_var(&xr, &pr)) {
/* xr * pr.z^2 mod p == pr.x, so the signature is valid. */
return 1;
}
if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
if (rustsecp256k1_v0_1_0_fe_cmp_var(&xr, &rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order) >= 0) {
/* xr + n >= p, so we can skip testing the second case. */
return 0;
}
secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe);
if (secp256k1_gej_eq_x_var(&xr, &pr)) {
rustsecp256k1_v0_1_0_fe_add(&xr, &rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe);
if (rustsecp256k1_v0_1_0_gej_eq_x_var(&xr, &pr)) {
/* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */
return 1;
}
@ -268,41 +268,41 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const
#endif
}
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) {
static int rustsecp256k1_v0_1_0_ecdsa_sig_sign(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_scalar *sigr, rustsecp256k1_v0_1_0_scalar *sigs, const rustsecp256k1_v0_1_0_scalar *seckey, const rustsecp256k1_v0_1_0_scalar *message, const rustsecp256k1_v0_1_0_scalar *nonce, int *recid) {
unsigned char b[32];
secp256k1_gej rp;
secp256k1_ge r;
secp256k1_scalar n;
rustsecp256k1_v0_1_0_gej rp;
rustsecp256k1_v0_1_0_ge r;
rustsecp256k1_v0_1_0_scalar n;
int overflow = 0;
secp256k1_ecmult_gen(ctx, &rp, nonce);
secp256k1_ge_set_gej(&r, &rp);
secp256k1_fe_normalize(&r.x);
secp256k1_fe_normalize(&r.y);
secp256k1_fe_get_b32(b, &r.x);
secp256k1_scalar_set_b32(sigr, b, &overflow);
rustsecp256k1_v0_1_0_ecmult_gen(ctx, &rp, nonce);
rustsecp256k1_v0_1_0_ge_set_gej(&r, &rp);
rustsecp256k1_v0_1_0_fe_normalize(&r.x);
rustsecp256k1_v0_1_0_fe_normalize(&r.y);
rustsecp256k1_v0_1_0_fe_get_b32(b, &r.x);
rustsecp256k1_v0_1_0_scalar_set_b32(sigr, b, &overflow);
/* These two conditions should be checked before calling */
VERIFY_CHECK(!secp256k1_scalar_is_zero(sigr));
VERIFY_CHECK(!rustsecp256k1_v0_1_0_scalar_is_zero(sigr));
VERIFY_CHECK(overflow == 0);
if (recid) {
/* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log
* of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria.
*/
*recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0);
*recid = (overflow ? 2 : 0) | (rustsecp256k1_v0_1_0_fe_is_odd(&r.y) ? 1 : 0);
}
secp256k1_scalar_mul(&n, sigr, seckey);
secp256k1_scalar_add(&n, &n, message);
secp256k1_scalar_inverse(sigs, nonce);
secp256k1_scalar_mul(sigs, sigs, &n);
secp256k1_scalar_clear(&n);
secp256k1_gej_clear(&rp);
secp256k1_ge_clear(&r);
if (secp256k1_scalar_is_zero(sigs)) {
rustsecp256k1_v0_1_0_scalar_mul(&n, sigr, seckey);
rustsecp256k1_v0_1_0_scalar_add(&n, &n, message);
rustsecp256k1_v0_1_0_scalar_inverse(sigs, nonce);
rustsecp256k1_v0_1_0_scalar_mul(sigs, sigs, &n);
rustsecp256k1_v0_1_0_scalar_clear(&n);
rustsecp256k1_v0_1_0_gej_clear(&rp);
rustsecp256k1_v0_1_0_ge_clear(&r);
if (rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) {
return 0;
}
if (secp256k1_scalar_is_high(sigs)) {
secp256k1_scalar_negate(sigs, sigs);
if (rustsecp256k1_v0_1_0_scalar_is_high(sigs)) {
rustsecp256k1_v0_1_0_scalar_negate(sigs, sigs);
if (recid) {
*recid ^= 1;
}

View File

@ -0,0 +1,25 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECKEY_H
#define SECP256K1_ECKEY_H
#include <stddef.h>
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
#include "ecmult_gen.h"
static int rustsecp256k1_v0_1_0_eckey_pubkey_parse(rustsecp256k1_v0_1_0_ge *elem, const unsigned char *pub, size_t size);
static int rustsecp256k1_v0_1_0_eckey_pubkey_serialize(rustsecp256k1_v0_1_0_ge *elem, unsigned char *pub, size_t *size, int compressed);
static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_add(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak);
static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak);
static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak);
static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak);
#endif /* SECP256K1_ECKEY_H */

View File

@ -0,0 +1,100 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECKEY_IMPL_H
#define SECP256K1_ECKEY_IMPL_H
#include "eckey.h"
#include "scalar.h"
#include "field.h"
#include "group.h"
#include "ecmult_gen.h"
static int rustsecp256k1_v0_1_0_eckey_pubkey_parse(rustsecp256k1_v0_1_0_ge *elem, const unsigned char *pub, size_t size) {
if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) {
rustsecp256k1_v0_1_0_fe x;
return rustsecp256k1_v0_1_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_1_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD);
} else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
rustsecp256k1_v0_1_0_fe x, y;
if (!rustsecp256k1_v0_1_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_1_0_fe_set_b32(&y, pub+33)) {
return 0;
}
rustsecp256k1_v0_1_0_ge_set_xy(elem, &x, &y);
if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) &&
rustsecp256k1_v0_1_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) {
return 0;
}
return rustsecp256k1_v0_1_0_ge_is_valid_var(elem);
} else {
return 0;
}
}
static int rustsecp256k1_v0_1_0_eckey_pubkey_serialize(rustsecp256k1_v0_1_0_ge *elem, unsigned char *pub, size_t *size, int compressed) {
if (rustsecp256k1_v0_1_0_ge_is_infinity(elem)) {
return 0;
}
rustsecp256k1_v0_1_0_fe_normalize_var(&elem->x);
rustsecp256k1_v0_1_0_fe_normalize_var(&elem->y);
rustsecp256k1_v0_1_0_fe_get_b32(&pub[1], &elem->x);
if (compressed) {
*size = 33;
pub[0] = rustsecp256k1_v0_1_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN;
} else {
*size = 65;
pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED;
rustsecp256k1_v0_1_0_fe_get_b32(&pub[33], &elem->y);
}
return 1;
}
static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_add(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak) {
rustsecp256k1_v0_1_0_scalar_add(key, key, tweak);
if (rustsecp256k1_v0_1_0_scalar_is_zero(key)) {
return 0;
}
return 1;
}
static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_add(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak) {
rustsecp256k1_v0_1_0_gej pt;
rustsecp256k1_v0_1_0_scalar one;
rustsecp256k1_v0_1_0_gej_set_ge(&pt, key);
rustsecp256k1_v0_1_0_scalar_set_int(&one, 1);
rustsecp256k1_v0_1_0_ecmult(ctx, &pt, &pt, &one, tweak);
if (rustsecp256k1_v0_1_0_gej_is_infinity(&pt)) {
return 0;
}
rustsecp256k1_v0_1_0_ge_set_gej(key, &pt);
return 1;
}
static int rustsecp256k1_v0_1_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_1_0_scalar *key, const rustsecp256k1_v0_1_0_scalar *tweak) {
if (rustsecp256k1_v0_1_0_scalar_is_zero(tweak)) {
return 0;
}
rustsecp256k1_v0_1_0_scalar_mul(key, key, tweak);
return 1;
}
static int rustsecp256k1_v0_1_0_eckey_pubkey_tweak_mul(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_ge *key, const rustsecp256k1_v0_1_0_scalar *tweak) {
rustsecp256k1_v0_1_0_scalar zero;
rustsecp256k1_v0_1_0_gej pt;
if (rustsecp256k1_v0_1_0_scalar_is_zero(tweak)) {
return 0;
}
rustsecp256k1_v0_1_0_scalar_set_int(&zero, 0);
rustsecp256k1_v0_1_0_gej_set_ge(&pt, key);
rustsecp256k1_v0_1_0_ecmult(ctx, &pt, &pt, tweak, &zero);
rustsecp256k1_v0_1_0_ge_set_gej(key, &pt);
return 1;
}
#endif /* SECP256K1_ECKEY_IMPL_H */

View File

@ -0,0 +1,48 @@
/**********************************************************************
* Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECMULT_H
#define SECP256K1_ECMULT_H
#include "num.h"
#include "group.h"
#include "scalar.h"
#include "scratch.h"
typedef struct {
/* For accelerating the computation of a*P + b*G: */
rustsecp256k1_v0_1_0_ge_storage (*pre_g)[]; /* odd multiples of the generator */
#ifdef USE_ENDOMORPHISM
rustsecp256k1_v0_1_0_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
#endif
} rustsecp256k1_v0_1_0_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void rustsecp256k1_v0_1_0_ecmult_context_init(rustsecp256k1_v0_1_0_ecmult_context *ctx);
static void rustsecp256k1_v0_1_0_ecmult_context_build(rustsecp256k1_v0_1_0_ecmult_context *ctx, void **prealloc);
static void rustsecp256k1_v0_1_0_ecmult_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_context *dst, const rustsecp256k1_v0_1_0_ecmult_context *src);
static void rustsecp256k1_v0_1_0_ecmult_context_clear(rustsecp256k1_v0_1_0_ecmult_context *ctx);
static int rustsecp256k1_v0_1_0_ecmult_context_is_built(const rustsecp256k1_v0_1_0_ecmult_context *ctx);
/** Double multiply: R = na*A + ng*G */
static void rustsecp256k1_v0_1_0_ecmult(const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_scalar *na, const rustsecp256k1_v0_1_0_scalar *ng);
typedef int (rustsecp256k1_v0_1_0_ecmult_multi_callback)(rustsecp256k1_v0_1_0_scalar *sc, rustsecp256k1_v0_1_0_ge *pt, size_t idx, void *data);
/**
* Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai.
* Chooses the right algorithm for a given number of points and scratch space
* size. Resets and overwrites the given scratch space. If the points do not
* fit in the scratch space the algorithm is repeatedly run with batches of
* points. If no scratch space is given then a simple algorithm is used that
* simply multiplies the points with the corresponding scalars and adds them up.
* Returns: 1 on success (including when inp_g_sc is NULL and n is 0)
* 0 if there is not enough scratch space for a single point or
* callback returns 0
*/
static int rustsecp256k1_v0_1_0_ecmult_multi_var(const rustsecp256k1_v0_1_0_callback* error_callback, const rustsecp256k1_v0_1_0_ecmult_context *ctx, rustsecp256k1_v0_1_0_scratch *scratch, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *inp_g_sc, rustsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n);
#endif /* SECP256K1_ECMULT_H */

View File

@ -12,6 +12,6 @@
/* Here `bits` should be set to the maximum bitlength of the _absolute value_ of `q`, plus
* one because we internally sometimes add 2 to the number during the WNAF conversion. */
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q, int bits);
static void rustsecp256k1_v0_1_0_ecmult_const(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_scalar *q, int bits);
#endif /* SECP256K1_ECMULT_CONST_H */

View File

@ -17,21 +17,21 @@
int m; \
int abs_n = (n) * (((n) > 0) * 2 - 1); \
int idx_n = abs_n / 2; \
secp256k1_fe neg_y; \
rustsecp256k1_v0_1_0_fe neg_y; \
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
VERIFY_SETUP(rustsecp256k1_v0_1_0_fe_clear(&(r)->x)); \
VERIFY_SETUP(rustsecp256k1_v0_1_0_fe_clear(&(r)->y)); \
for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \
/* This loop is used to avoid secret data in array indices. See
* the comment in ecmult_gen_impl.h for rationale. */ \
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
rustsecp256k1_v0_1_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
rustsecp256k1_v0_1_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
} \
(r)->infinity = 0; \
secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
rustsecp256k1_v0_1_0_fe_negate(&neg_y, &(r)->y, 1); \
rustsecp256k1_v0_1_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
} while(0)
@ -48,7 +48,7 @@
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
static int rustsecp256k1_v0_1_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_1_0_scalar *scalar, int w, int size) {
int global_sign;
int skew = 0;
int word = 0;
@ -59,7 +59,7 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w
int flip;
int bit;
secp256k1_scalar s;
rustsecp256k1_v0_1_0_scalar s;
int not_neg_one;
VERIFY_CHECK(w > 0);
@ -77,33 +77,33 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w
* particular, to ensure that the outputs from the endomorphism-split fit into
* 128 bits). If we negate, the parity of our number flips, inverting which of
* {1, 2} we want to add to the scalar when ensuring that it's odd. Further
* complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
* complicating things, -1 interacts badly with `rustsecp256k1_v0_1_0_scalar_cadd_bit` and
* we need to special-case it in this logic. */
flip = secp256k1_scalar_is_high(scalar);
flip = rustsecp256k1_v0_1_0_scalar_is_high(scalar);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
bit = flip ^ !secp256k1_scalar_is_even(scalar);
bit = flip ^ !rustsecp256k1_v0_1_0_scalar_is_even(scalar);
/* We check for negative one, since adding 2 to it will cause an overflow */
secp256k1_scalar_negate(&s, scalar);
not_neg_one = !secp256k1_scalar_is_one(&s);
rustsecp256k1_v0_1_0_scalar_negate(&s, scalar);
not_neg_one = !rustsecp256k1_v0_1_0_scalar_is_one(&s);
s = *scalar;
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
rustsecp256k1_v0_1_0_scalar_cadd_bit(&s, bit, not_neg_one);
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
* that we added two to it and flipped it. In fact for -1 these operations are
* identical. We only flipped, but since skewing is required (in the sense that
* the skew must be 1 or 2, never zero) and flipping is not, we need to change
* our flags to claim that we only skewed. */
global_sign = secp256k1_scalar_cond_negate(&s, flip);
global_sign = rustsecp256k1_v0_1_0_scalar_cond_negate(&s, flip);
global_sign *= not_neg_one * 2 - 1;
skew = 1 << bit;
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
u_last = rustsecp256k1_v0_1_0_scalar_shr_int(&s, w);
do {
int sign;
int even;
/* 4.1 4.4 */
u = secp256k1_scalar_shr_int(&s, w);
u = rustsecp256k1_v0_1_0_scalar_shr_int(&s, w);
/* 4.2 */
even = ((u & 1) == 0);
sign = 2 * (u_last > 0) - 1;
@ -117,22 +117,22 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w
} while (word * w < size);
wnaf[word] = u * global_sign;
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
VERIFY_CHECK(rustsecp256k1_v0_1_0_scalar_is_zero(&s));
VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
return skew;
}
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) {
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge tmpa;
secp256k1_fe Z;
static void rustsecp256k1_v0_1_0_ecmult_const(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a, const rustsecp256k1_v0_1_0_scalar *scalar, int size) {
rustsecp256k1_v0_1_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
rustsecp256k1_v0_1_0_ge tmpa;
rustsecp256k1_v0_1_0_fe Z;
int skew_1;
#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
rustsecp256k1_v0_1_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
rustsecp256k1_v0_1_0_scalar q_1, q_lam;
#endif
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
@ -144,13 +144,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
rustsecp256k1_v0_1_0_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = rustsecp256k1_v0_1_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = rustsecp256k1_v0_1_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
skew_1 = rustsecp256k1_v0_1_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
#ifdef USE_ENDOMORPHISM
skew_lam = 0;
#endif
@ -162,15 +162,15 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
* that the Z coordinate was 1, use affine addition formulae, and correct
* the Z coordinate of the result once at the end.
*/
secp256k1_gej_set_ge(r, a);
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
rustsecp256k1_v0_1_0_gej_set_ge(r, a);
rustsecp256k1_v0_1_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
rustsecp256k1_v0_1_0_fe_normalize_weak(&pre_a[i].y);
}
#ifdef USE_ENDOMORPHISM
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
rustsecp256k1_v0_1_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
#endif
@ -181,13 +181,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
rustsecp256k1_v0_1_0_gej_set_ge(r, &tmpa);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa);
}
#endif
/* remaining loop iterations */
@ -195,64 +195,64 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
int n;
int j;
for (j = 0; j < WINDOW_A - 1; ++j) {
secp256k1_gej_double_nonzero(r, r, NULL);
rustsecp256k1_v0_1_0_gej_double_nonzero(r, r, NULL);
}
n = wnaf_1[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &tmpa);
}
#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &Z);
{
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
rustsecp256k1_v0_1_0_ge correction = *a;
rustsecp256k1_v0_1_0_ge_storage correction_1_stor;
#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage correction_lam_stor;
rustsecp256k1_v0_1_0_ge_storage correction_lam_stor;
#endif
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
rustsecp256k1_v0_1_0_ge_storage a2_stor;
rustsecp256k1_v0_1_0_gej tmpj;
rustsecp256k1_v0_1_0_gej_set_ge(&tmpj, &correction);
rustsecp256k1_v0_1_0_gej_double_var(&tmpj, &tmpj, NULL);
rustsecp256k1_v0_1_0_ge_set_gej(&correction, &tmpj);
rustsecp256k1_v0_1_0_ge_to_storage(&correction_1_stor, a);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_to_storage(&correction_lam_stor, a);
rustsecp256k1_v0_1_0_ge_to_storage(&correction_lam_stor, a);
}
#endif
secp256k1_ge_to_storage(&a2_stor, &correction);
rustsecp256k1_v0_1_0_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
rustsecp256k1_v0_1_0_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
rustsecp256k1_v0_1_0_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
#endif
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
rustsecp256k1_v0_1_0_ge_from_storage(&correction, &correction_1_stor);
rustsecp256k1_v0_1_0_ge_neg(&correction, &correction);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &correction);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
rustsecp256k1_v0_1_0_ge_from_storage(&correction, &correction_lam_stor);
rustsecp256k1_v0_1_0_ge_neg(&correction, &correction);
rustsecp256k1_v0_1_0_ge_mul_lambda(&correction, &correction);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &correction);
}
#endif
}

View File

@ -23,21 +23,21 @@ typedef struct {
* None of the resulting prec group elements have a known scalar, and neither do any of
* the intermediate sums while computing a*G.
*/
secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */
secp256k1_scalar blind;
secp256k1_gej initial;
} secp256k1_ecmult_gen_context;
rustsecp256k1_v0_1_0_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */
rustsecp256k1_v0_1_0_scalar blind;
rustsecp256k1_v0_1_0_gej initial;
} rustsecp256k1_v0_1_0_ecmult_gen_context;
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx);
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc);
static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src);
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx);
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx);
static void rustsecp256k1_v0_1_0_ecmult_gen_context_init(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx);
static void rustsecp256k1_v0_1_0_ecmult_gen_context_build(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx, void **prealloc);
static void rustsecp256k1_v0_1_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_gen_context *dst, const rustsecp256k1_v0_1_0_ecmult_gen_context* src);
static void rustsecp256k1_v0_1_0_ecmult_gen_context_clear(rustsecp256k1_v0_1_0_ecmult_gen_context* ctx);
static int rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx);
/** Multiply with the generator: R = a*G */
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context* ctx, secp256k1_gej *r, const secp256k1_scalar *a);
static void rustsecp256k1_v0_1_0_ecmult_gen(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *a);
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32);
static void rustsecp256k1_v0_1_0_ecmult_gen_blind(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, const unsigned char *seed32);
#endif /* SECP256K1_ECMULT_GEN_H */

View File

@ -17,20 +17,20 @@
#endif
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec));
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_1_0_ecmult_gen_context*) NULL)->prec));
#else
static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
#endif
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
static void rustsecp256k1_v0_1_0_ecmult_gen_context_init(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) {
static void rustsecp256k1_v0_1_0_ecmult_gen_context_build(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, void **prealloc) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
secp256k1_ge prec[1024];
secp256k1_gej gj;
secp256k1_gej nums_gej;
rustsecp256k1_v0_1_0_ge prec[1024];
rustsecp256k1_v0_1_0_gej gj;
rustsecp256k1_v0_1_0_gej nums_gej;
int i, j;
size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
void* const base = *prealloc;
@ -40,101 +40,101 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
ctx->prec = (secp256k1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
ctx->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
rustsecp256k1_v0_1_0_gej_set_ge(&gj, &rustsecp256k1_v0_1_0_ge_const_g);
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
{
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
secp256k1_fe nums_x;
secp256k1_ge nums_ge;
rustsecp256k1_v0_1_0_fe nums_x;
rustsecp256k1_v0_1_0_ge nums_ge;
int r;
r = secp256k1_fe_set_b32(&nums_x, nums_b32);
r = rustsecp256k1_v0_1_0_fe_set_b32(&nums_x, nums_b32);
(void)r;
VERIFY_CHECK(r);
r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0);
r = rustsecp256k1_v0_1_0_ge_set_xo_var(&nums_ge, &nums_x, 0);
(void)r;
VERIFY_CHECK(r);
secp256k1_gej_set_ge(&nums_gej, &nums_ge);
rustsecp256k1_v0_1_0_gej_set_ge(&nums_gej, &nums_ge);
/* Add G to make the bits in x uniformly distributed. */
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL);
rustsecp256k1_v0_1_0_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_1_0_ge_const_g, NULL);
}
/* compute prec. */
{
secp256k1_gej precj[1024]; /* Jacobian versions of prec. */
secp256k1_gej gbase;
secp256k1_gej numsbase;
rustsecp256k1_v0_1_0_gej precj[1024]; /* Jacobian versions of prec. */
rustsecp256k1_v0_1_0_gej gbase;
rustsecp256k1_v0_1_0_gej numsbase;
gbase = gj; /* 16^j * G */
numsbase = nums_gej; /* 2^j * nums. */
for (j = 0; j < 64; j++) {
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
precj[j*16] = numsbase;
for (i = 1; i < 16; i++) {
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
rustsecp256k1_v0_1_0_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
}
/* Multiply gbase by 16. */
for (i = 0; i < 4; i++) {
secp256k1_gej_double_var(&gbase, &gbase, NULL);
rustsecp256k1_v0_1_0_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
rustsecp256k1_v0_1_0_gej_double_var(&numsbase, &numsbase, NULL);
if (j == 62) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
secp256k1_gej_neg(&numsbase, &numsbase);
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
rustsecp256k1_v0_1_0_gej_neg(&numsbase, &numsbase);
rustsecp256k1_v0_1_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
secp256k1_ge_set_all_gej_var(prec, precj, 1024);
rustsecp256k1_v0_1_0_ge_set_all_gej_var(prec, precj, 1024);
}
for (j = 0; j < 64; j++) {
for (i = 0; i < 16; i++) {
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
rustsecp256k1_v0_1_0_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
}
}
#else
(void)prealloc;
ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
ctx->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])rustsecp256k1_v0_1_0_ecmult_static_context;
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
rustsecp256k1_v0_1_0_ecmult_gen_blind(ctx, NULL);
}
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) {
static int rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_1_0_ecmult_gen_context* ctx) {
return ctx->prec != NULL;
}
static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) {
static void rustsecp256k1_v0_1_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_1_0_ecmult_gen_context *dst, const rustsecp256k1_v0_1_0_ecmult_gen_context *src) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
if (src->prec != NULL) {
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->prec = (secp256k1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
dst->prec = (rustsecp256k1_v0_1_0_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
}
#else
(void)dst, (void)src;
#endif
}
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
secp256k1_scalar_clear(&ctx->blind);
secp256k1_gej_clear(&ctx->initial);
static void rustsecp256k1_v0_1_0_ecmult_gen_context_clear(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx) {
rustsecp256k1_v0_1_0_scalar_clear(&ctx->blind);
rustsecp256k1_v0_1_0_gej_clear(&ctx->initial);
ctx->prec = NULL;
}
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) {
secp256k1_ge add;
secp256k1_ge_storage adds;
secp256k1_scalar gnb;
static void rustsecp256k1_v0_1_0_ecmult_gen(const rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_scalar *gn) {
rustsecp256k1_v0_1_0_ge add;
rustsecp256k1_v0_1_0_ge_storage adds;
rustsecp256k1_v0_1_0_scalar gnb;
int bits;
int i, j;
memset(&adds, 0, sizeof(adds));
*r = ctx->initial;
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
rustsecp256k1_v0_1_0_scalar_add(&gnb, gn, &ctx->blind);
add.infinity = 0;
for (j = 0; j < 64; j++) {
bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
bits = rustsecp256k1_v0_1_0_scalar_get_bits(&gnb, j * 4, 4);
for (i = 0; i < 16; i++) {
/** This uses a conditional move to avoid any secret data in array indexes.
* _Any_ use of secret indexes has been demonstrated to result in timing
@ -146,33 +146,33 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
* (http://www.tau.ac.il/~tromer/papers/cache.pdf)
*/
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
rustsecp256k1_v0_1_0_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
}
secp256k1_ge_from_storage(&add, &adds);
secp256k1_gej_add_ge(r, r, &add);
rustsecp256k1_v0_1_0_ge_from_storage(&add, &adds);
rustsecp256k1_v0_1_0_gej_add_ge(r, r, &add);
}
bits = 0;
secp256k1_ge_clear(&add);
secp256k1_scalar_clear(&gnb);
rustsecp256k1_v0_1_0_ge_clear(&add);
rustsecp256k1_v0_1_0_scalar_clear(&gnb);
}
/* Setup blinding values for secp256k1_ecmult_gen. */
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) {
secp256k1_scalar b;
secp256k1_gej gb;
secp256k1_fe s;
/* Setup blinding values for rustsecp256k1_v0_1_0_ecmult_gen. */
static void rustsecp256k1_v0_1_0_ecmult_gen_blind(rustsecp256k1_v0_1_0_ecmult_gen_context *ctx, const unsigned char *seed32) {
rustsecp256k1_v0_1_0_scalar b;
rustsecp256k1_v0_1_0_gej gb;
rustsecp256k1_v0_1_0_fe s;
unsigned char nonce32[32];
secp256k1_rfc6979_hmac_sha256 rng;
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 rng;
int retry;
unsigned char keydata[64] = {0};
if (seed32 == NULL) {
/* When seed is NULL, reset the initial point and blinding value. */
secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g);
secp256k1_gej_neg(&ctx->initial, &ctx->initial);
secp256k1_scalar_set_int(&ctx->blind, 1);
rustsecp256k1_v0_1_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_1_0_ge_const_g);
rustsecp256k1_v0_1_0_gej_neg(&ctx->initial, &ctx->initial);
rustsecp256k1_v0_1_0_scalar_set_int(&ctx->blind, 1);
}
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
secp256k1_scalar_get_b32(nonce32, &ctx->blind);
rustsecp256k1_v0_1_0_scalar_get_b32(nonce32, &ctx->blind);
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
* asking the caller for blinding values directly and expecting them to retry on failure.
@ -181,31 +181,31 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const
if (seed32 != NULL) {
memcpy(keydata + 32, seed32, 32);
}
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
memset(keydata, 0, sizeof(keydata));
/* Retry for out of range results to achieve uniformity. */
do {
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
retry = !secp256k1_fe_set_b32(&s, nonce32);
retry |= secp256k1_fe_is_zero(&s);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
retry = !rustsecp256k1_v0_1_0_fe_set_b32(&s, nonce32);
retry |= rustsecp256k1_v0_1_0_fe_is_zero(&s);
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */
/* Randomize the projection to defend against multiplier sidechannels. */
secp256k1_gej_rescale(&ctx->initial, &s);
secp256k1_fe_clear(&s);
rustsecp256k1_v0_1_0_gej_rescale(&ctx->initial, &s);
rustsecp256k1_v0_1_0_fe_clear(&s);
do {
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
secp256k1_scalar_set_b32(&b, nonce32, &retry);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
rustsecp256k1_v0_1_0_scalar_set_b32(&b, nonce32, &retry);
/* A blinding value of 0 works, but would undermine the projection hardening. */
retry |= secp256k1_scalar_is_zero(&b);
retry |= rustsecp256k1_v0_1_0_scalar_is_zero(&b);
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(&rng);
memset(nonce32, 0, 32);
secp256k1_ecmult_gen(ctx, &gb, &b);
secp256k1_scalar_negate(&b, &b);
rustsecp256k1_v0_1_0_ecmult_gen(ctx, &gb, &b);
rustsecp256k1_v0_1_0_scalar_negate(&b, &b);
ctx->blind = b;
ctx->initial = gb;
secp256k1_scalar_clear(&b);
secp256k1_gej_clear(&gb);
rustsecp256k1_v0_1_0_scalar_clear(&b);
rustsecp256k1_v0_1_0_gej_clear(&gb);
}
#endif /* SECP256K1_ECMULT_GEN_IMPL_H */

View File

@ -33,100 +33,100 @@
#include "util.h"
/** Normalize a field element. */
static void secp256k1_fe_normalize(secp256k1_fe *r);
static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r);
/** Weakly normalize a field element: reduce it magnitude to 1, but don't fully normalize. */
static void secp256k1_fe_normalize_weak(secp256k1_fe *r);
static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r);
/** Normalize a field element, without constant-time guarantee. */
static void secp256k1_fe_normalize_var(secp256k1_fe *r);
static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
* implementation may optionally normalize the input, but this should not be relied upon. */
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r);
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r);
/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
* implementation may optionally normalize the input, but this should not be relied upon. */
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r);
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r);
/** Set a field element equal to a small integer. Resulting field element is normalized. */
static void secp256k1_fe_set_int(secp256k1_fe *r, int a);
static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a);
/** Sets a field element equal to zero, initializing all fields. */
static void secp256k1_fe_clear(secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a);
/** Verify whether a field element is zero. Requires the input to be normalized. */
static int secp256k1_fe_is_zero(const secp256k1_fe *a);
static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a);
/** Check the "oddness" of a field element. Requires the input to be normalized. */
static int secp256k1_fe_is_odd(const secp256k1_fe *a);
static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a);
/** Compare two field elements. Requires magnitude-1 inputs. */
static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b);
static int rustsecp256k1_v0_1_0_fe_equal(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b);
/** Same as secp256k1_fe_equal, but may be variable time. */
static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b);
/** Same as rustsecp256k1_v0_1_0_fe_equal, but may be variable time. */
static int rustsecp256k1_v0_1_0_fe_equal_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b);
/** Compare two field elements. Requires both inputs to be normalized */
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b);
static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b);
/** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a);
static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a);
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a);
/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input
* as an argument. The magnitude of the output is one higher. */
static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m);
static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m);
/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that
* small integer. */
static void secp256k1_fe_mul_int(secp256k1_fe *r, int a);
static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a);
/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */
static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a);
/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8.
* The output magnitude is 1 (but not guaranteed to be normalized). */
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b);
static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b);
/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8.
* The output magnitude is 1 (but not guaranteed to be normalized). */
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a);
/** If a has a square root, it is computed in r and 1 is returned. If a does not
* have a square root, the root of its negation is computed and 0 is returned.
* The input's magnitude can be at most 8. The output magnitude is 1 (but not
* guaranteed to be normalized). The result in r will always be a square
* itself. */
static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a);
static int rustsecp256k1_v0_1_0_fe_sqrt(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a);
/** Checks whether a field element is a quadratic residue. */
static int secp256k1_fe_is_quad_var(const secp256k1_fe *a);
static int rustsecp256k1_v0_1_0_fe_is_quad_var(const rustsecp256k1_v0_1_0_fe *a);
/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be
* at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_inv(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a);
/** Potentially faster version of secp256k1_fe_inv, without constant-time guarantee. */
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a);
/** Potentially faster version of rustsecp256k1_v0_1_0_fe_inv, without constant-time guarantee. */
static void rustsecp256k1_v0_1_0_fe_inv_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a);
/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be
* at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and
* outputs must not overlap in memory. */
static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len);
static void rustsecp256k1_v0_1_0_fe_inv_all_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, size_t len);
/** Convert a field element to the storage type. */
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a);
/** Convert a field element back from the storage type. */
static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a);
static void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag);
static void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag);
static void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag);
#endif /* SECP256K1_FIELD_H */

View File

@ -18,7 +18,7 @@ typedef struct {
int magnitude;
int normalized;
#endif
} secp256k1_fe;
} rustsecp256k1_v0_1_0_fe;
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
@ -42,7 +42,7 @@ typedef struct {
typedef struct {
uint32_t n[8];
} secp256k1_fe_storage;
} rustsecp256k1_v0_1_0_fe_storage;
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}
#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0]

View File

@ -11,7 +11,7 @@
#include "field.h"
#ifdef VERIFY
static void secp256k1_fe_verify(const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_verify(const rustsecp256k1_v0_1_0_fe *a) {
const uint32_t *d = a->n;
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
r &= (d[0] <= 0x3FFFFFFUL * m);
@ -39,7 +39,7 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) {
}
#endif
static void secp256k1_fe_normalize(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -90,11 +90,11 @@ static void secp256k1_fe_normalize(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -121,11 +121,11 @@ static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -177,11 +177,11 @@ static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
@ -210,7 +210,7 @@ static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
}
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r) {
uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
uint32_t z0, z1;
uint32_t x;
@ -262,34 +262,34 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
return (z0 == 0) | (z1 == 0x3FFFFFFUL);
}
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a) {
r->n[0] = a;
r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a) {
const uint32_t *t = a->n;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
}
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
return a->n[0] & 1;
}
SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a) {
int i;
#ifdef VERIFY
a->magnitude = 0;
@ -300,13 +300,13 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
}
}
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
rustsecp256k1_v0_1_0_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(b);
#endif
for (i = 9; i >= 0; i--) {
if (a->n[i] > b->n[i]) {
@ -319,7 +319,7 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
return 0;
}
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a) {
r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24);
r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22);
r->n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20);
@ -337,16 +337,16 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
return 1;
}
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r[0] = (a->n[9] >> 14) & 0xff;
r[1] = (a->n[9] >> 6) & 0xff;
@ -382,10 +382,10 @@ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
r[31] = a->n[0] & 0xff;
}
SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= m);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0];
r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1];
@ -400,11 +400,11 @@ SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k
#ifdef VERIFY
r->magnitude = m + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a) {
r->n[0] *= a;
r->n[1] *= a;
r->n[2] *= a;
@ -418,13 +418,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
#ifdef VERIFY
r->magnitude *= a;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r->n[0] += a->n[0];
r->n[1] += a->n[1];
@ -439,15 +439,15 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_f
#ifdef VERIFY
r->magnitude += a->magnitude;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
#if defined(USE_EXTERNAL_ASM)
/* External assembler implementation */
void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
void rustsecp256k1_v0_1_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b);
void rustsecp256k1_v0_1_0_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#else
@ -457,7 +457,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
uint64_t c, d;
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
@ -787,7 +787,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t
/* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
uint64_t c, d;
uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
@ -1062,37 +1062,37 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t
}
#endif
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
VERIFY_CHECK(b->magnitude <= 8);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
rustsecp256k1_v0_1_0_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
secp256k1_fe_mul_inner(r->n, a->n, b->n);
rustsecp256k1_v0_1_0_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
secp256k1_fe_sqr_inner(r->n, a->n);
rustsecp256k1_v0_1_0_fe_sqr_inner(r->n, a->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag) {
uint32_t mask0, mask1;
mask0 = flag + ~((uint32_t)0);
mask1 = ~mask0;
@ -1114,7 +1114,7 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_
#endif
}
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag) {
uint32_t mask0, mask1;
mask0 = flag + ~((uint32_t)0);
mask1 = ~mask0;
@ -1128,7 +1128,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r,
r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
}
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
@ -1142,7 +1142,7 @@ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe
r->n[7] = a->n[8] >> 16 | a->n[9] << 10;
}
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a) {
r->n[0] = a->n[0] & 0x3FFFFFFUL;
r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL);
r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL);

View File

@ -18,7 +18,7 @@ typedef struct {
int magnitude;
int normalized;
#endif
} secp256k1_fe;
} rustsecp256k1_v0_1_0_fe;
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
@ -37,7 +37,7 @@ typedef struct {
typedef struct {
uint64_t n[4];
} secp256k1_fe_storage;
} rustsecp256k1_v0_1_0_fe_storage;
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \
(d0) | (((uint64_t)(d1)) << 32), \

View File

@ -14,7 +14,7 @@
#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
#define SECP256K1_FIELD_INNER5X52_IMPL_H
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c
@ -284,7 +284,7 @@ __asm__ __volatile__(
);
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c

View File

@ -29,7 +29,7 @@
*/
#ifdef VERIFY
static void secp256k1_fe_verify(const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_verify(const rustsecp256k1_v0_1_0_fe *a) {
const uint64_t *d = a->n;
int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
/* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
@ -50,7 +50,7 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) {
}
#endif
static void secp256k1_fe_normalize(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize(rustsecp256k1_v0_1_0_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -89,11 +89,11 @@ static void secp256k1_fe_normalize(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize_weak(rustsecp256k1_v0_1_0_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -113,11 +113,11 @@ static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
static void rustsecp256k1_v0_1_0_fe_normalize_var(rustsecp256k1_v0_1_0_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* Reduce t4 at the start so there will be at most a single carry from the first pass */
@ -157,11 +157,11 @@ static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero(rustsecp256k1_v0_1_0_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
/* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
@ -184,7 +184,7 @@ static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
static int rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(rustsecp256k1_v0_1_0_fe *r) {
uint64_t t0, t1, t2, t3, t4;
uint64_t z0, z1;
uint64_t x;
@ -225,34 +225,34 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
}
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_set_int(rustsecp256k1_v0_1_0_fe *r, int a) {
r->n[0] = a;
r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_zero(const rustsecp256k1_v0_1_0_fe *a) {
const uint64_t *t = a->n;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
}
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_is_odd(const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
return a->n[0] & 1;
}
SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_clear(rustsecp256k1_v0_1_0_fe *a) {
int i;
#ifdef VERIFY
a->magnitude = 0;
@ -263,13 +263,13 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
}
}
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
static int rustsecp256k1_v0_1_0_fe_cmp_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) {
int i;
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
rustsecp256k1_v0_1_0_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(b);
#endif
for (i = 4; i >= 0; i--) {
if (a->n[i] > b->n[i]) {
@ -282,7 +282,7 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
return 0;
}
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
static int rustsecp256k1_v0_1_0_fe_set_b32(rustsecp256k1_v0_1_0_fe *r, const unsigned char *a) {
r->n[0] = (uint64_t)a[31]
| ((uint64_t)a[30] << 8)
| ((uint64_t)a[29] << 16)
@ -323,16 +323,16 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
return 1;
}
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r[0] = (a->n[4] >> 40) & 0xFF;
r[1] = (a->n[4] >> 32) & 0xFF;
@ -368,10 +368,10 @@ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
r[31] = a->n[0] & 0xFF;
}
SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_negate(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int m) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= m);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
@ -381,11 +381,11 @@ SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k
#ifdef VERIFY
r->magnitude = m + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_int(rustsecp256k1_v0_1_0_fe *r, int a) {
r->n[0] *= a;
r->n[1] *= a;
r->n[2] *= a;
@ -394,13 +394,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
#ifdef VERIFY
r->magnitude *= a;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_add(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
r->n[0] += a->n[0];
r->n[1] += a->n[1];
@ -410,41 +410,41 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_f
#ifdef VERIFY
r->magnitude += a->magnitude;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
static void rustsecp256k1_v0_1_0_fe_mul(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
VERIFY_CHECK(b->magnitude <= 8);
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
rustsecp256k1_v0_1_0_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(b);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
#endif
secp256k1_fe_mul_inner(r->n, a->n, b->n);
rustsecp256k1_v0_1_0_fe_mul_inner(r->n, a->n, b->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_sqr(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->magnitude <= 8);
secp256k1_fe_verify(a);
rustsecp256k1_v0_1_0_fe_verify(a);
#endif
secp256k1_fe_sqr_inner(r->n, a->n);
rustsecp256k1_v0_1_0_fe_sqr_inner(r->n, a->n);
#ifdef VERIFY
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
rustsecp256k1_v0_1_0_fe_verify(r);
#endif
}
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_cmov(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, int flag) {
uint64_t mask0, mask1;
mask0 = flag + ~((uint64_t)0);
mask1 = ~mask0;
@ -461,7 +461,7 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_
#endif
}
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_storage_cmov(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe_storage *a, int flag) {
uint64_t mask0, mask1;
mask0 = flag + ~((uint64_t)0);
mask1 = ~mask0;
@ -471,7 +471,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r,
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
}
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
static void rustsecp256k1_v0_1_0_fe_to_storage(rustsecp256k1_v0_1_0_fe_storage *r, const rustsecp256k1_v0_1_0_fe *a) {
#ifdef VERIFY
VERIFY_CHECK(a->normalized);
#endif
@ -481,7 +481,7 @@ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe
r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
}
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_fe_from_storage(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe_storage *a) {
r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);

View File

@ -15,7 +15,7 @@
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
uint128_t c, d;
uint64_t t3, t4, tx, u0;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
@ -154,7 +154,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
/* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
SECP256K1_INLINE static void rustsecp256k1_v0_1_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
uint128_t c, d;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
int64_t t3, t4, tx, u0;

View File

@ -0,0 +1,318 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_FIELD_IMPL_H
#define SECP256K1_FIELD_IMPL_H
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
#include "num.h"
#if defined(USE_FIELD_10X26)
#include "field_10x26_impl.h"
#elif defined(USE_FIELD_5X52)
#include "field_5x52_impl.h"
#else
#error "Please select field implementation"
#endif
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_equal(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) {
rustsecp256k1_v0_1_0_fe na;
rustsecp256k1_v0_1_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_1_0_fe_add(&na, b);
return rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&na);
}
SECP256K1_INLINE static int rustsecp256k1_v0_1_0_fe_equal_var(const rustsecp256k1_v0_1_0_fe *a, const rustsecp256k1_v0_1_0_fe *b) {
rustsecp256k1_v0_1_0_fe na;
rustsecp256k1_v0_1_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_1_0_fe_add(&na, b);
return rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&na);
}
static int rustsecp256k1_v0_1_0_fe_sqrt(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
/** Given that p is congruent to 3 mod 4, we can compute the square root of
* a mod p as the (p+1)/4'th power of a.
*
* As (p+1)/4 is an even number, it will have the same result for a and for
* (-a). Only one of these two numbers actually has a square root however,
* so we test at the end by squaring and comparing to the input.
* Also because (p+1)/4 is an even number, the computed square root is
* itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)).
*/
rustsecp256k1_v0_1_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
VERIFY_CHECK(r != a);
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_1_0_fe_sqr(&x2, a);
rustsecp256k1_v0_1_0_fe_mul(&x2, &x2, a);
rustsecp256k1_v0_1_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x6, &x6);
}
rustsecp256k1_v0_1_0_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x9, &x9);
}
rustsecp256k1_v0_1_0_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x11, &x11);
}
rustsecp256k1_v0_1_0_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x22, &x22);
}
rustsecp256k1_v0_1_0_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x44, &x44);
}
rustsecp256k1_v0_1_0_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x88, &x88);
}
rustsecp256k1_v0_1_0_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x176, &x176);
}
rustsecp256k1_v0_1_0_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x220, &x220);
}
rustsecp256k1_v0_1_0_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x223, &x223);
}
rustsecp256k1_v0_1_0_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x22);
for (j=0; j<6; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x2);
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
rustsecp256k1_v0_1_0_fe_sqr(r, &t1);
/* Check that a square root was actually calculated */
rustsecp256k1_v0_1_0_fe_sqr(&t1, r);
return rustsecp256k1_v0_1_0_fe_equal(&t1, a);
}
static void rustsecp256k1_v0_1_0_fe_inv(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
rustsecp256k1_v0_1_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j;
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_1_0_fe_sqr(&x2, a);
rustsecp256k1_v0_1_0_fe_mul(&x2, &x2, a);
rustsecp256k1_v0_1_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x6, &x6);
}
rustsecp256k1_v0_1_0_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x9, &x9);
}
rustsecp256k1_v0_1_0_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x11, &x11);
}
rustsecp256k1_v0_1_0_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x22, &x22);
}
rustsecp256k1_v0_1_0_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x44, &x44);
}
rustsecp256k1_v0_1_0_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x88, &x88);
}
rustsecp256k1_v0_1_0_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x176, &x176);
}
rustsecp256k1_v0_1_0_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x220, &x220);
}
rustsecp256k1_v0_1_0_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&x223, &x223);
}
rustsecp256k1_v0_1_0_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x22);
for (j=0; j<5; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, a);
for (j=0; j<3; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(&t1, &t1, &x2);
for (j=0; j<2; j++) {
rustsecp256k1_v0_1_0_fe_sqr(&t1, &t1);
}
rustsecp256k1_v0_1_0_fe_mul(r, a, &t1);
}
static void rustsecp256k1_v0_1_0_fe_inv_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a) {
#if defined(USE_FIELD_INV_BUILTIN)
rustsecp256k1_v0_1_0_fe_inv(r, a);
#elif defined(USE_FIELD_INV_NUM)
rustsecp256k1_v0_1_0_num n, m;
static const rustsecp256k1_v0_1_0_fe negone = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
);
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
unsigned char b[32];
int res;
rustsecp256k1_v0_1_0_fe c = *a;
rustsecp256k1_v0_1_0_fe_normalize_var(&c);
rustsecp256k1_v0_1_0_fe_get_b32(b, &c);
rustsecp256k1_v0_1_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_1_0_num_set_bin(&m, prime, 32);
rustsecp256k1_v0_1_0_num_mod_inverse(&n, &n, &m);
rustsecp256k1_v0_1_0_num_get_bin(b, 32, &n);
res = rustsecp256k1_v0_1_0_fe_set_b32(r, b);
(void)res;
VERIFY_CHECK(res);
/* Verify the result is the (unique) valid inverse using non-GMP code. */
rustsecp256k1_v0_1_0_fe_mul(&c, &c, r);
rustsecp256k1_v0_1_0_fe_add(&c, &negone);
CHECK(rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&c));
#else
#error "Please select field inverse implementation"
#endif
}
static void rustsecp256k1_v0_1_0_fe_inv_all_var(rustsecp256k1_v0_1_0_fe *r, const rustsecp256k1_v0_1_0_fe *a, size_t len) {
rustsecp256k1_v0_1_0_fe u;
size_t i;
if (len < 1) {
return;
}
VERIFY_CHECK((r + len <= a) || (a + len <= r));
r[0] = a[0];
i = 0;
while (++i < len) {
rustsecp256k1_v0_1_0_fe_mul(&r[i], &r[i - 1], &a[i]);
}
rustsecp256k1_v0_1_0_fe_inv_var(&u, &r[--i]);
while (i > 0) {
size_t j = i--;
rustsecp256k1_v0_1_0_fe_mul(&r[j], &r[i], &u);
rustsecp256k1_v0_1_0_fe_mul(&u, &u, &a[j]);
}
r[0] = u;
}
static int rustsecp256k1_v0_1_0_fe_is_quad_var(const rustsecp256k1_v0_1_0_fe *a) {
#ifndef USE_NUM_NONE
unsigned char b[32];
rustsecp256k1_v0_1_0_num n;
rustsecp256k1_v0_1_0_num m;
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
rustsecp256k1_v0_1_0_fe c = *a;
rustsecp256k1_v0_1_0_fe_normalize_var(&c);
rustsecp256k1_v0_1_0_fe_get_b32(b, &c);
rustsecp256k1_v0_1_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_1_0_num_set_bin(&m, prime, 32);
return rustsecp256k1_v0_1_0_num_jacobi(&n, &m) >= 0;
#else
rustsecp256k1_v0_1_0_fe r;
return rustsecp256k1_v0_1_0_fe_sqrt(&r, a);
#endif
}
#endif /* SECP256K1_FIELD_IMPL_H */

View File

@ -20,13 +20,13 @@ static void default_error_callback_fn(const char* str, void* data) {
abort();
}
static const secp256k1_callback default_error_callback = {
static const rustsecp256k1_v0_1_0_callback default_error_callback = {
default_error_callback_fn,
NULL
};
int main(int argc, char **argv) {
secp256k1_ecmult_gen_context ctx;
rustsecp256k1_v0_1_0_ecmult_gen_context ctx;
void *prealloc, *base;
int inner;
int outer;
@ -45,12 +45,12 @@ int main(int argc, char **argv) {
fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#include \"src/group.h\"\n");
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n");
fprintf(fp, "static const rustsecp256k1_v0_1_0_ge_storage rustsecp256k1_v0_1_0_ecmult_static_context[64][16] = {\n");
base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE);
prealloc = base;
secp256k1_ecmult_gen_context_init(&ctx);
secp256k1_ecmult_gen_context_build(&ctx, &prealloc);
rustsecp256k1_v0_1_0_ecmult_gen_context_init(&ctx);
rustsecp256k1_v0_1_0_ecmult_gen_context_build(&ctx, &prealloc);
for(outer = 0; outer != 64; outer++) {
fprintf(fp,"{\n");
for(inner = 0; inner != 16; inner++) {
@ -68,7 +68,7 @@ int main(int argc, char **argv) {
}
}
fprintf(fp,"};\n");
secp256k1_ecmult_gen_context_clear(&ctx);
rustsecp256k1_v0_1_0_ecmult_gen_context_clear(&ctx);
free(base);
fprintf(fp, "#undef SC\n");

View File

@ -12,131 +12,131 @@
/** A group element of the secp256k1 curve, in affine coordinates. */
typedef struct {
secp256k1_fe x;
secp256k1_fe y;
rustsecp256k1_v0_1_0_fe x;
rustsecp256k1_v0_1_0_fe y;
int infinity; /* whether this represents the point at infinity */
} secp256k1_ge;
} rustsecp256k1_v0_1_0_ge;
#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0}
#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
/** A group element of the secp256k1 curve, in jacobian coordinates. */
typedef struct {
secp256k1_fe x; /* actual X: x/z^2 */
secp256k1_fe y; /* actual Y: y/z^3 */
secp256k1_fe z;
rustsecp256k1_v0_1_0_fe x; /* actual X: x/z^2 */
rustsecp256k1_v0_1_0_fe y; /* actual Y: y/z^3 */
rustsecp256k1_v0_1_0_fe z;
int infinity; /* whether this represents the point at infinity */
} secp256k1_gej;
} rustsecp256k1_v0_1_0_gej;
#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0}
#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
typedef struct {
secp256k1_fe_storage x;
secp256k1_fe_storage y;
} secp256k1_ge_storage;
rustsecp256k1_v0_1_0_fe_storage x;
rustsecp256k1_v0_1_0_fe_storage y;
} rustsecp256k1_v0_1_0_ge_storage;
#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))}
#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y)
/** Set a group element equal to the point with given X and Y coordinates */
static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y);
static void rustsecp256k1_v0_1_0_ge_set_xy(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_fe *y);
/** Set a group element (affine) equal to the point with the given X coordinate
* and a Y coordinate that is a quadratic residue modulo p. The return value
* is true iff a coordinate with the given X coordinate exists.
*/
static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x);
static int rustsecp256k1_v0_1_0_ge_set_xquad(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x);
/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
* for Y. Return value indicates whether the result is valid. */
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd);
static int rustsecp256k1_v0_1_0_ge_set_xo_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, int odd);
/** Check whether a group element is the point at infinity. */
static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
static int rustsecp256k1_v0_1_0_ge_is_infinity(const rustsecp256k1_v0_1_0_ge *a);
/** Check whether a group element is valid (i.e., on the curve). */
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
static int rustsecp256k1_v0_1_0_ge_is_valid_var(const rustsecp256k1_v0_1_0_ge *a);
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
static void rustsecp256k1_v0_1_0_ge_neg(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a);
/** Set a group element equal to another which is given in jacobian coordinates */
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a);
static void rustsecp256k1_v0_1_0_ge_set_gej(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a);
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len);
static void rustsecp256k1_v0_1_0_ge_set_all_gej_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, size_t len);
/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
* the same global z "denominator". zr must contain the known z-ratios such
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y
* coordinates of the result are stored in r, the common z coordinate is
* stored in globalz. */
static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr);
static void rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_fe *globalz, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zr);
/** Set a group element (affine) equal to the point at infinity. */
static void secp256k1_ge_set_infinity(secp256k1_ge *r);
static void rustsecp256k1_v0_1_0_ge_set_infinity(rustsecp256k1_v0_1_0_ge *r);
/** Set a group element (jacobian) equal to the point at infinity. */
static void secp256k1_gej_set_infinity(secp256k1_gej *r);
static void rustsecp256k1_v0_1_0_gej_set_infinity(rustsecp256k1_v0_1_0_gej *r);
/** Set a group element (jacobian) equal to another which is given in affine coordinates. */
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a);
static void rustsecp256k1_v0_1_0_gej_set_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a);
/** Compare the X coordinate of a group element (jacobian). */
static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a);
static int rustsecp256k1_v0_1_0_gej_eq_x_var(const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_gej *a);
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a);
static void rustsecp256k1_v0_1_0_gej_neg(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a);
/** Check whether a group element is the point at infinity. */
static int secp256k1_gej_is_infinity(const secp256k1_gej *a);
static int rustsecp256k1_v0_1_0_gej_is_infinity(const rustsecp256k1_v0_1_0_gej *a);
/** Check whether a group element's y coordinate is a quadratic residue. */
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a);
static int rustsecp256k1_v0_1_0_gej_has_quad_y_var(const rustsecp256k1_v0_1_0_gej *a);
/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0).
* a may not be zero. Constant time. */
static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
static void rustsecp256k1_v0_1_0_gej_double_nonzero(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr);
/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
static void rustsecp256k1_v0_1_0_gej_double_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr);
/** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr);
static void rustsecp256k1_v0_1_0_gej_add_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_gej *b, rustsecp256k1_v0_1_0_fe *rzr);
/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */
static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b);
static void rustsecp256k1_v0_1_0_gej_add_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b);
/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time
than rustsecp256k1_v0_1_0_gej_add_var. It is identical to rustsecp256k1_v0_1_0_gej_add_ge but without constant-time
guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr);
static void rustsecp256k1_v0_1_0_gej_add_ge_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, rustsecp256k1_v0_1_0_fe *rzr);
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
static void rustsecp256k1_v0_1_0_gej_add_zinv_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, const rustsecp256k1_v0_1_0_fe *bzinv);
#ifdef USE_ENDOMORPHISM
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
static void rustsecp256k1_v0_1_0_ge_mul_lambda(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a);
#endif
/** Clear a secp256k1_gej to prevent leaking sensitive information. */
static void secp256k1_gej_clear(secp256k1_gej *r);
/** Clear a rustsecp256k1_v0_1_0_gej to prevent leaking sensitive information. */
static void rustsecp256k1_v0_1_0_gej_clear(rustsecp256k1_v0_1_0_gej *r);
/** Clear a secp256k1_ge to prevent leaking sensitive information. */
static void secp256k1_ge_clear(secp256k1_ge *r);
/** Clear a rustsecp256k1_v0_1_0_ge to prevent leaking sensitive information. */
static void rustsecp256k1_v0_1_0_ge_clear(rustsecp256k1_v0_1_0_ge *r);
/** Convert a group element to the storage type. */
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a);
static void rustsecp256k1_v0_1_0_ge_to_storage(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge *a);
/** Convert a group element back from the storage type. */
static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a);
static void rustsecp256k1_v0_1_0_ge_from_storage(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag);
static void rustsecp256k1_v0_1_0_ge_storage_cmov(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge_storage *a, int flag);
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
static void rustsecp256k1_v0_1_0_gej_rescale(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_fe *b);
#endif /* SECP256K1_GROUP_H */

View File

@ -0,0 +1,705 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_GROUP_IMPL_H
#define SECP256K1_GROUP_IMPL_H
#include "num.h"
#include "field.h"
#include "group.h"
/* These points can be generated in sage as follows:
*
* 0. Setup a worksheet with the following parameters.
* b = 4 # whatever CURVE_B will be set to
* F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
* C = EllipticCurve ([F (0), F (b)])
*
* 1. Determine all the small orders available to you. (If there are
* no satisfactory ones, go back and change b.)
* print C.order().factor(limit=1000)
*
* 2. Choose an order as one of the prime factors listed in the above step.
* (You can also multiply some to get a composite order, though the
* tests will crash trying to invert scalars during signing.) We take a
* random point and scale it to drop its order to the desired value.
* There is some probability this won't work; just try again.
* order = 199
* P = C.random_point()
* P = (int(P.order()) / int(order)) * P
* assert(P.order() == order)
*
* 3. Print the values. You'll need to use a vim macro or something to
* split the hex output into 4-byte chunks.
* print "%x %x" % P.xy()
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 199
static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST(
0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
);
static const int CURVE_B = 4;
# elif EXHAUSTIVE_TEST_ORDER == 13
static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST(
0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
);
static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
#else
/** Generator for secp256k1, value 'g' defined in
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
*/
static const rustsecp256k1_v0_1_0_ge rustsecp256k1_v0_1_0_ge_const_g = SECP256K1_GE_CONST(
0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
static const int CURVE_B = 7;
#endif
static void rustsecp256k1_v0_1_0_ge_set_gej_zinv(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zi) {
rustsecp256k1_v0_1_0_fe zi2;
rustsecp256k1_v0_1_0_fe zi3;
rustsecp256k1_v0_1_0_fe_sqr(&zi2, zi);
rustsecp256k1_v0_1_0_fe_mul(&zi3, &zi2, zi);
rustsecp256k1_v0_1_0_fe_mul(&r->x, &a->x, &zi2);
rustsecp256k1_v0_1_0_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity;
}
static void rustsecp256k1_v0_1_0_ge_set_xy(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_fe *y) {
r->infinity = 0;
r->x = *x;
r->y = *y;
}
static int rustsecp256k1_v0_1_0_ge_is_infinity(const rustsecp256k1_v0_1_0_ge *a) {
return a->infinity;
}
static void rustsecp256k1_v0_1_0_ge_neg(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a) {
*r = *a;
rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1);
}
static void rustsecp256k1_v0_1_0_ge_set_gej(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a) {
rustsecp256k1_v0_1_0_fe z2, z3;
r->infinity = a->infinity;
rustsecp256k1_v0_1_0_fe_inv(&a->z, &a->z);
rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_1_0_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_1_0_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_1_0_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_1_0_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
}
static void rustsecp256k1_v0_1_0_ge_set_gej_var(rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_gej *a) {
rustsecp256k1_v0_1_0_fe z2, z3;
r->infinity = a->infinity;
if (a->infinity) {
return;
}
rustsecp256k1_v0_1_0_fe_inv_var(&a->z, &a->z);
rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_1_0_fe_mul(&z3, &a->z, &z2);
rustsecp256k1_v0_1_0_fe_mul(&a->x, &a->x, &z2);
rustsecp256k1_v0_1_0_fe_mul(&a->y, &a->y, &z3);
rustsecp256k1_v0_1_0_fe_set_int(&a->z, 1);
r->x = a->x;
r->y = a->y;
}
static void rustsecp256k1_v0_1_0_ge_set_all_gej_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_gej *a, size_t len) {
rustsecp256k1_v0_1_0_fe u;
size_t i;
size_t last_i = SIZE_MAX;
for (i = 0; i < len; i++) {
if (!a[i].infinity) {
/* Use destination's x coordinates as scratch space */
if (last_i == SIZE_MAX) {
r[i].x = a[i].z;
} else {
rustsecp256k1_v0_1_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
}
last_i = i;
}
}
if (last_i == SIZE_MAX) {
return;
}
rustsecp256k1_v0_1_0_fe_inv_var(&u, &r[last_i].x);
i = last_i;
while (i > 0) {
i--;
if (!a[i].infinity) {
rustsecp256k1_v0_1_0_fe_mul(&r[last_i].x, &r[i].x, &u);
rustsecp256k1_v0_1_0_fe_mul(&u, &u, &a[last_i].z);
last_i = i;
}
}
VERIFY_CHECK(!a[last_i].infinity);
r[last_i].x = u;
for (i = 0; i < len; i++) {
r[i].infinity = a[i].infinity;
if (!a[i].infinity) {
rustsecp256k1_v0_1_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
}
}
}
static void rustsecp256k1_v0_1_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_1_0_ge *r, rustsecp256k1_v0_1_0_fe *globalz, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_fe *zr) {
size_t i = len - 1;
rustsecp256k1_v0_1_0_fe zs;
if (len > 0) {
/* The z of the final point gives us the "global Z" for the table. */
r[i].x = a[i].x;
r[i].y = a[i].y;
/* Ensure all y values are in weak normal form for fast negation of points */
rustsecp256k1_v0_1_0_fe_normalize_weak(&r[i].y);
*globalz = a[i].z;
r[i].infinity = 0;
zs = zr[i];
/* Work our way backwards, using the z-ratios to scale the x/y values. */
while (i > 0) {
if (i != len - 1) {
rustsecp256k1_v0_1_0_fe_mul(&zs, &zs, &zr[i]);
}
i--;
rustsecp256k1_v0_1_0_ge_set_gej_zinv(&r[i], &a[i], &zs);
}
}
}
static void rustsecp256k1_v0_1_0_gej_set_infinity(rustsecp256k1_v0_1_0_gej *r) {
r->infinity = 1;
rustsecp256k1_v0_1_0_fe_clear(&r->x);
rustsecp256k1_v0_1_0_fe_clear(&r->y);
rustsecp256k1_v0_1_0_fe_clear(&r->z);
}
static void rustsecp256k1_v0_1_0_ge_set_infinity(rustsecp256k1_v0_1_0_ge *r) {
r->infinity = 1;
rustsecp256k1_v0_1_0_fe_clear(&r->x);
rustsecp256k1_v0_1_0_fe_clear(&r->y);
}
static void rustsecp256k1_v0_1_0_gej_clear(rustsecp256k1_v0_1_0_gej *r) {
r->infinity = 0;
rustsecp256k1_v0_1_0_fe_clear(&r->x);
rustsecp256k1_v0_1_0_fe_clear(&r->y);
rustsecp256k1_v0_1_0_fe_clear(&r->z);
}
static void rustsecp256k1_v0_1_0_ge_clear(rustsecp256k1_v0_1_0_ge *r) {
r->infinity = 0;
rustsecp256k1_v0_1_0_fe_clear(&r->x);
rustsecp256k1_v0_1_0_fe_clear(&r->y);
}
static int rustsecp256k1_v0_1_0_ge_set_xquad(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x) {
rustsecp256k1_v0_1_0_fe x2, x3, c;
r->x = *x;
rustsecp256k1_v0_1_0_fe_sqr(&x2, x);
rustsecp256k1_v0_1_0_fe_mul(&x3, x, &x2);
r->infinity = 0;
rustsecp256k1_v0_1_0_fe_set_int(&c, CURVE_B);
rustsecp256k1_v0_1_0_fe_add(&c, &x3);
return rustsecp256k1_v0_1_0_fe_sqrt(&r->y, &c);
}
static int rustsecp256k1_v0_1_0_ge_set_xo_var(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_fe *x, int odd) {
if (!rustsecp256k1_v0_1_0_ge_set_xquad(r, x)) {
return 0;
}
rustsecp256k1_v0_1_0_fe_normalize_var(&r->y);
if (rustsecp256k1_v0_1_0_fe_is_odd(&r->y) != odd) {
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1);
}
return 1;
}
static void rustsecp256k1_v0_1_0_gej_set_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_ge *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
rustsecp256k1_v0_1_0_fe_set_int(&r->z, 1);
}
static int rustsecp256k1_v0_1_0_gej_eq_x_var(const rustsecp256k1_v0_1_0_fe *x, const rustsecp256k1_v0_1_0_gej *a) {
rustsecp256k1_v0_1_0_fe r, r2;
VERIFY_CHECK(!a->infinity);
rustsecp256k1_v0_1_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_1_0_fe_mul(&r, &r, x);
r2 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&r2);
return rustsecp256k1_v0_1_0_fe_equal_var(&r, &r2);
}
static void rustsecp256k1_v0_1_0_gej_neg(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a) {
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
r->z = a->z;
rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->y, 1);
}
static int rustsecp256k1_v0_1_0_gej_is_infinity(const rustsecp256k1_v0_1_0_gej *a) {
return a->infinity;
}
static int rustsecp256k1_v0_1_0_gej_is_valid_var(const rustsecp256k1_v0_1_0_gej *a) {
rustsecp256k1_v0_1_0_fe y2, x3, z2, z6;
if (a->infinity) {
return 0;
}
/** y^2 = x^3 + 7
* (Y/Z^3)^2 = (X/Z^2)^3 + 7
* Y^2 / Z^6 = X^3 / Z^6 + 7
* Y^2 = X^3 + 7*Z^6
*/
rustsecp256k1_v0_1_0_fe_sqr(&y2, &a->y);
rustsecp256k1_v0_1_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, &a->x);
rustsecp256k1_v0_1_0_fe_sqr(&z2, &a->z);
rustsecp256k1_v0_1_0_fe_sqr(&z6, &z2); rustsecp256k1_v0_1_0_fe_mul(&z6, &z6, &z2);
rustsecp256k1_v0_1_0_fe_mul_int(&z6, CURVE_B);
rustsecp256k1_v0_1_0_fe_add(&x3, &z6);
rustsecp256k1_v0_1_0_fe_normalize_weak(&x3);
return rustsecp256k1_v0_1_0_fe_equal_var(&y2, &x3);
}
static int rustsecp256k1_v0_1_0_ge_is_valid_var(const rustsecp256k1_v0_1_0_ge *a) {
rustsecp256k1_v0_1_0_fe y2, x3, c;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
rustsecp256k1_v0_1_0_fe_sqr(&y2, &a->y);
rustsecp256k1_v0_1_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_1_0_fe_mul(&x3, &x3, &a->x);
rustsecp256k1_v0_1_0_fe_set_int(&c, CURVE_B);
rustsecp256k1_v0_1_0_fe_add(&x3, &c);
rustsecp256k1_v0_1_0_fe_normalize_weak(&x3);
return rustsecp256k1_v0_1_0_fe_equal_var(&y2, &x3);
}
static void rustsecp256k1_v0_1_0_gej_double_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr) {
/* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
*
* Note that there is an implementation described at
* https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
* which trades a multiply for a square, but in practice this is actually slower,
* mainly because it requires more normalizations.
*/
rustsecp256k1_v0_1_0_fe t1,t2,t3,t4;
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
* y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
*
* Having said this, if this function receives a point on a sextic twist, e.g. by
* a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
* since -6 does have a cube root mod p. For this point, this function will not set
* the infinity flag even though the point doubles to infinity, and the result
* point will be gibberish (z = 0 but infinity = 0).
*/
r->infinity = a->infinity;
if (r->infinity) {
if (rzr != NULL) {
rustsecp256k1_v0_1_0_fe_set_int(rzr, 1);
}
return;
}
if (rzr != NULL) {
*rzr = a->y;
rustsecp256k1_v0_1_0_fe_normalize_weak(rzr);
rustsecp256k1_v0_1_0_fe_mul_int(rzr, 2);
}
rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &a->y);
rustsecp256k1_v0_1_0_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
rustsecp256k1_v0_1_0_fe_sqr(&t1, &a->x);
rustsecp256k1_v0_1_0_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
rustsecp256k1_v0_1_0_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
rustsecp256k1_v0_1_0_fe_sqr(&t3, &a->y);
rustsecp256k1_v0_1_0_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
rustsecp256k1_v0_1_0_fe_sqr(&t4, &t3);
rustsecp256k1_v0_1_0_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
rustsecp256k1_v0_1_0_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
r->x = t3;
rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
rustsecp256k1_v0_1_0_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
rustsecp256k1_v0_1_0_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
rustsecp256k1_v0_1_0_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
rustsecp256k1_v0_1_0_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
rustsecp256k1_v0_1_0_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
rustsecp256k1_v0_1_0_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
rustsecp256k1_v0_1_0_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
}
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_gej_double_nonzero(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, rustsecp256k1_v0_1_0_fe *rzr) {
VERIFY_CHECK(!rustsecp256k1_v0_1_0_gej_is_infinity(a));
rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr);
}
static void rustsecp256k1_v0_1_0_gej_add_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_gej *b, rustsecp256k1_v0_1_0_fe *rzr) {
/* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_1_0_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
*r = *b;
return;
}
if (b->infinity) {
if (rzr != NULL) {
rustsecp256k1_v0_1_0_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
rustsecp256k1_v0_1_0_fe_sqr(&z22, &b->z);
rustsecp256k1_v0_1_0_fe_sqr(&z12, &a->z);
rustsecp256k1_v0_1_0_fe_mul(&u1, &a->x, &z22);
rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12);
rustsecp256k1_v0_1_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_1_0_fe_mul(&s1, &s1, &b->z);
rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2);
rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
rustsecp256k1_v0_1_0_fe_set_int(rzr, 0);
}
r->infinity = 1;
}
return;
}
rustsecp256k1_v0_1_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_1_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2);
rustsecp256k1_v0_1_0_fe_mul(&h, &h, &b->z);
if (rzr != NULL) {
*rzr = h;
}
rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_1_0_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_1_0_gej_add_ge_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, rustsecp256k1_v0_1_0_fe *rzr) {
/* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_1_0_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
rustsecp256k1_v0_1_0_gej_set_ge(r, b);
return;
}
if (b->infinity) {
if (rzr != NULL) {
rustsecp256k1_v0_1_0_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
rustsecp256k1_v0_1_0_fe_sqr(&z12, &a->z);
u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1);
rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1);
rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z);
rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2);
rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_1_0_gej_double_var(r, a, rzr);
} else {
if (rzr != NULL) {
rustsecp256k1_v0_1_0_fe_set_int(rzr, 0);
}
r->infinity = 1;
}
return;
}
rustsecp256k1_v0_1_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_1_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2);
if (rzr != NULL) {
*rzr = h;
}
rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &h);
rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_1_0_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_1_0_gej_add_zinv_var(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b, const rustsecp256k1_v0_1_0_fe *bzinv) {
/* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
rustsecp256k1_v0_1_0_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
if (b->infinity) {
*r = *a;
return;
}
if (a->infinity) {
rustsecp256k1_v0_1_0_fe bzinv2, bzinv3;
r->infinity = b->infinity;
rustsecp256k1_v0_1_0_fe_sqr(&bzinv2, bzinv);
rustsecp256k1_v0_1_0_fe_mul(&bzinv3, &bzinv2, bzinv);
rustsecp256k1_v0_1_0_fe_mul(&r->x, &b->x, &bzinv2);
rustsecp256k1_v0_1_0_fe_mul(&r->y, &b->y, &bzinv3);
rustsecp256k1_v0_1_0_fe_set_int(&r->z, 1);
return;
}
r->infinity = 0;
/** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
* secp256k1's isomorphism we can multiply the Z coordinates on both sides
* by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
* This means that (rx,ry,rz) can be calculated as
* (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
* The variable az below holds the modified Z coordinate for a, which is used
* for the computation of rx and ry, but not for rz.
*/
rustsecp256k1_v0_1_0_fe_mul(&az, &a->z, bzinv);
rustsecp256k1_v0_1_0_fe_sqr(&z12, &az);
u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1);
rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &z12);
s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1);
rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &az);
rustsecp256k1_v0_1_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_1_0_fe_add(&h, &u2);
rustsecp256k1_v0_1_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_1_0_fe_add(&i, &s2);
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&h)) {
if (rustsecp256k1_v0_1_0_fe_normalizes_to_zero_var(&i)) {
rustsecp256k1_v0_1_0_gej_double_var(r, a, NULL);
} else {
r->infinity = 1;
}
return;
}
rustsecp256k1_v0_1_0_fe_sqr(&i2, &i);
rustsecp256k1_v0_1_0_fe_sqr(&h2, &h);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h, &h2);
r->z = a->z; rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &h);
rustsecp256k1_v0_1_0_fe_mul(&t, &u1, &h2);
r->x = t; rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_1_0_fe_add(&r->x, &h3); rustsecp256k1_v0_1_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_1_0_fe_add(&r->x, &i2);
rustsecp256k1_v0_1_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_1_0_fe_add(&r->y, &t); rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &i);
rustsecp256k1_v0_1_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_1_0_fe_negate(&h3, &h3, 1);
rustsecp256k1_v0_1_0_fe_add(&r->y, &h3);
}
static void rustsecp256k1_v0_1_0_gej_add_ge(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_gej *a, const rustsecp256k1_v0_1_0_ge *b) {
/* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
static const rustsecp256k1_v0_1_0_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
rustsecp256k1_v0_1_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
rustsecp256k1_v0_1_0_fe m_alt, rr_alt;
int infinity, degenerate;
VERIFY_CHECK(!b->infinity);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
/** In:
* Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
* In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
* we find as solution for a unified addition/doubling formula:
* lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
* x3 = lambda^2 - (x1 + x2)
* 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
*
* Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
* U1 = X1*Z2^2, U2 = X2*Z1^2
* S1 = Y1*Z2^3, S2 = Y2*Z1^3
* Z = Z1*Z2
* T = U1+U2
* M = S1+S2
* Q = T*M^2
* R = T^2-U1*U2
* X3 = 4*(R^2-Q)
* Y3 = 4*(R*(3*Q-2*R^2)-M^4)
* Z3 = 2*M*Z
* (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
*
* This formula has the benefit of being the same for both addition
* of distinct points and doubling. However, it breaks down in the
* case that either point is infinity, or that y1 = -y2. We handle
* these cases in the following ways:
*
* - If b is infinity we simply bail by means of a VERIFY_CHECK.
*
* - If a is infinity, we detect this, and at the end of the
* computation replace the result (which will be meaningless,
* but we compute to be constant-time) with b.x : b.y : 1.
*
* - If a = -b, we have y1 = -y2, which is a degenerate case.
* But here the answer is infinity, so we simply set the
* infinity flag of the result, overriding the computed values
* without even needing to cmov.
*
* - If y1 = -y2 but x1 != x2, which does occur thanks to certain
* properties of our curve (specifically, 1 has nontrivial cube
* roots in our field, and the curve equation has no x coefficient)
* then the answer is not infinity but also not given by the above
* equation. In this case, we cmov in place an alternate expression
* for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
* expressions for lambda are defined, they are equal, and can be
* obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
* then substitution of x^3 + 7 for y^2 (using the curve equation).
* For all pairs of nonzero points (a, b) at least one is defined,
* so this covers everything.
*/
rustsecp256k1_v0_1_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */
u1 = a->x; rustsecp256k1_v0_1_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
rustsecp256k1_v0_1_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
s1 = a->y; rustsecp256k1_v0_1_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
rustsecp256k1_v0_1_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */
rustsecp256k1_v0_1_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
t = u1; rustsecp256k1_v0_1_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
m = s1; rustsecp256k1_v0_1_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
rustsecp256k1_v0_1_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */
rustsecp256k1_v0_1_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
rustsecp256k1_v0_1_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
rustsecp256k1_v0_1_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
/** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
* case that Z = z1z2 = 0, and this is special-cased later on). */
degenerate = rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&m) &
rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&rr);
/* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
* This means either x1 == beta*x2 or beta*x1 == x2, where beta is
* a nontrivial cube root of one. In either case, an alternate
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
* so we set R/M equal to this. */
rr_alt = s1;
rustsecp256k1_v0_1_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
rustsecp256k1_v0_1_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
rustsecp256k1_v0_1_0_fe_cmov(&rr_alt, &rr, !degenerate);
rustsecp256k1_v0_1_0_fe_cmov(&m_alt, &m, !degenerate);
/* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
* From here on out Ralt and Malt represent the numerator
* and denominator of lambda; R and M represent the explicit
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
rustsecp256k1_v0_1_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
rustsecp256k1_v0_1_0_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
/* These two lines use the observation that either M == Malt or M == 0,
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
* zero (which is "computed" by cmov). So the cost is one squaring
* versus two multiplications. */
rustsecp256k1_v0_1_0_fe_sqr(&n, &n);
rustsecp256k1_v0_1_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
rustsecp256k1_v0_1_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
rustsecp256k1_v0_1_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
infinity = rustsecp256k1_v0_1_0_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
rustsecp256k1_v0_1_0_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
rustsecp256k1_v0_1_0_fe_negate(&q, &q, 1); /* q = -Q (2) */
rustsecp256k1_v0_1_0_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
rustsecp256k1_v0_1_0_fe_normalize_weak(&t);
r->x = t; /* r->x = Ralt^2-Q (1) */
rustsecp256k1_v0_1_0_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
rustsecp256k1_v0_1_0_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
rustsecp256k1_v0_1_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
rustsecp256k1_v0_1_0_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
rustsecp256k1_v0_1_0_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
rustsecp256k1_v0_1_0_fe_normalize_weak(&r->y);
rustsecp256k1_v0_1_0_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
rustsecp256k1_v0_1_0_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
/** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
rustsecp256k1_v0_1_0_fe_cmov(&r->x, &b->x, a->infinity);
rustsecp256k1_v0_1_0_fe_cmov(&r->y, &b->y, a->infinity);
rustsecp256k1_v0_1_0_fe_cmov(&r->z, &fe_1, a->infinity);
r->infinity = infinity;
}
static void rustsecp256k1_v0_1_0_gej_rescale(rustsecp256k1_v0_1_0_gej *r, const rustsecp256k1_v0_1_0_fe *s) {
/* Operations: 4 mul, 1 sqr */
rustsecp256k1_v0_1_0_fe zz;
VERIFY_CHECK(!rustsecp256k1_v0_1_0_fe_is_zero(s));
rustsecp256k1_v0_1_0_fe_sqr(&zz, s);
rustsecp256k1_v0_1_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, &zz);
rustsecp256k1_v0_1_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
rustsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */
}
static void rustsecp256k1_v0_1_0_ge_to_storage(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge *a) {
rustsecp256k1_v0_1_0_fe x, y;
VERIFY_CHECK(!a->infinity);
x = a->x;
rustsecp256k1_v0_1_0_fe_normalize(&x);
y = a->y;
rustsecp256k1_v0_1_0_fe_normalize(&y);
rustsecp256k1_v0_1_0_fe_to_storage(&r->x, &x);
rustsecp256k1_v0_1_0_fe_to_storage(&r->y, &y);
}
static void rustsecp256k1_v0_1_0_ge_from_storage(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge_storage *a) {
rustsecp256k1_v0_1_0_fe_from_storage(&r->x, &a->x);
rustsecp256k1_v0_1_0_fe_from_storage(&r->y, &a->y);
r->infinity = 0;
}
static SECP256K1_INLINE void rustsecp256k1_v0_1_0_ge_storage_cmov(rustsecp256k1_v0_1_0_ge_storage *r, const rustsecp256k1_v0_1_0_ge_storage *a, int flag) {
rustsecp256k1_v0_1_0_fe_storage_cmov(&r->x, &a->x, flag);
rustsecp256k1_v0_1_0_fe_storage_cmov(&r->y, &a->y, flag);
}
#ifdef USE_ENDOMORPHISM
static void rustsecp256k1_v0_1_0_ge_mul_lambda(rustsecp256k1_v0_1_0_ge *r, const rustsecp256k1_v0_1_0_ge *a) {
static const rustsecp256k1_v0_1_0_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
);
*r = *a;
rustsecp256k1_v0_1_0_fe_mul(&r->x, &r->x, &beta);
}
#endif
static int rustsecp256k1_v0_1_0_gej_has_quad_y_var(const rustsecp256k1_v0_1_0_gej *a) {
rustsecp256k1_v0_1_0_fe yz;
if (a->infinity) {
return 0;
}
/* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as
* that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z
is */
rustsecp256k1_v0_1_0_fe_mul(&yz, &a->y, &a->z);
return rustsecp256k1_v0_1_0_fe_is_quad_var(&yz);
}
#endif /* SECP256K1_GROUP_IMPL_H */

View File

@ -0,0 +1,41 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_HASH_H
#define SECP256K1_HASH_H
#include <stdlib.h>
#include <stdint.h>
typedef struct {
uint32_t s[8];
uint32_t buf[16]; /* In big endian */
size_t bytes;
} rustsecp256k1_v0_1_0_sha256;
static void rustsecp256k1_v0_1_0_sha256_initialize(rustsecp256k1_v0_1_0_sha256 *hash);
static void rustsecp256k1_v0_1_0_sha256_write(rustsecp256k1_v0_1_0_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_1_0_sha256_finalize(rustsecp256k1_v0_1_0_sha256 *hash, unsigned char *out32);
typedef struct {
rustsecp256k1_v0_1_0_sha256 inner, outer;
} rustsecp256k1_v0_1_0_hmac_sha256;
static void rustsecp256k1_v0_1_0_hmac_sha256_initialize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *key, size_t size);
static void rustsecp256k1_v0_1_0_hmac_sha256_write(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *data, size_t size);
static void rustsecp256k1_v0_1_0_hmac_sha256_finalize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, unsigned char *out32);
typedef struct {
unsigned char v[32];
unsigned char k[32];
int retry;
} rustsecp256k1_v0_1_0_rfc6979_hmac_sha256;
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen);
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen);
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng);
#endif /* SECP256K1_HASH_H */

View File

@ -33,7 +33,7 @@
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#endif
static void secp256k1_sha256_initialize(secp256k1_sha256 *hash) {
static void rustsecp256k1_v0_1_0_sha256_initialize(rustsecp256k1_v0_1_0_sha256 *hash) {
hash->s[0] = 0x6a09e667ul;
hash->s[1] = 0xbb67ae85ul;
hash->s[2] = 0x3c6ef372ul;
@ -46,7 +46,7 @@ static void secp256k1_sha256_initialize(secp256k1_sha256 *hash) {
}
/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
static void rustsecp256k1_v0_1_0_sha256_transform(uint32_t* s, const uint32_t* chunk) {
uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
@ -128,7 +128,7 @@ static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
s[7] += h;
}
static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t len) {
static void rustsecp256k1_v0_1_0_sha256_write(rustsecp256k1_v0_1_0_sha256 *hash, const unsigned char *data, size_t len) {
size_t bufsize = hash->bytes & 0x3F;
hash->bytes += len;
while (bufsize + len >= 64) {
@ -137,7 +137,7 @@ static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *
memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len);
data += chunk_len;
len -= chunk_len;
secp256k1_sha256_transform(hash->s, hash->buf);
rustsecp256k1_v0_1_0_sha256_transform(hash->s, hash->buf);
bufsize = 0;
}
if (len) {
@ -146,15 +146,15 @@ static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *
}
}
static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out32) {
static void rustsecp256k1_v0_1_0_sha256_finalize(rustsecp256k1_v0_1_0_sha256 *hash, unsigned char *out32) {
static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uint32_t sizedesc[2];
uint32_t out[8];
int i = 0;
sizedesc[0] = BE32(hash->bytes >> 29);
sizedesc[1] = BE32(hash->bytes << 3);
secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8);
rustsecp256k1_v0_1_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
rustsecp256k1_v0_1_0_sha256_write(hash, (const unsigned char*)sizedesc, 8);
for (i = 0; i < 8; i++) {
out[i] = BE32(hash->s[i]);
hash->s[i] = 0;
@ -162,49 +162,49 @@ static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out
memcpy(out32, (const unsigned char*)out, 32);
}
static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
static void rustsecp256k1_v0_1_0_hmac_sha256_initialize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
size_t n;
unsigned char rkey[64];
if (keylen <= sizeof(rkey)) {
memcpy(rkey, key, keylen);
memset(rkey + keylen, 0, sizeof(rkey) - keylen);
} else {
secp256k1_sha256 sha256;
secp256k1_sha256_initialize(&sha256);
secp256k1_sha256_write(&sha256, key, keylen);
secp256k1_sha256_finalize(&sha256, rkey);
rustsecp256k1_v0_1_0_sha256 sha256;
rustsecp256k1_v0_1_0_sha256_initialize(&sha256);
rustsecp256k1_v0_1_0_sha256_write(&sha256, key, keylen);
rustsecp256k1_v0_1_0_sha256_finalize(&sha256, rkey);
memset(rkey + 32, 0, 32);
}
secp256k1_sha256_initialize(&hash->outer);
rustsecp256k1_v0_1_0_sha256_initialize(&hash->outer);
for (n = 0; n < sizeof(rkey); n++) {
rkey[n] ^= 0x5c;
}
secp256k1_sha256_write(&hash->outer, rkey, sizeof(rkey));
rustsecp256k1_v0_1_0_sha256_write(&hash->outer, rkey, sizeof(rkey));
secp256k1_sha256_initialize(&hash->inner);
rustsecp256k1_v0_1_0_sha256_initialize(&hash->inner);
for (n = 0; n < sizeof(rkey); n++) {
rkey[n] ^= 0x5c ^ 0x36;
}
secp256k1_sha256_write(&hash->inner, rkey, sizeof(rkey));
rustsecp256k1_v0_1_0_sha256_write(&hash->inner, rkey, sizeof(rkey));
memset(rkey, 0, sizeof(rkey));
}
static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256 *hash, const unsigned char *data, size_t size) {
secp256k1_sha256_write(&hash->inner, data, size);
static void rustsecp256k1_v0_1_0_hmac_sha256_write(rustsecp256k1_v0_1_0_hmac_sha256 *hash, const unsigned char *data, size_t size) {
rustsecp256k1_v0_1_0_sha256_write(&hash->inner, data, size);
}
static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256 *hash, unsigned char *out32) {
static void rustsecp256k1_v0_1_0_hmac_sha256_finalize(rustsecp256k1_v0_1_0_hmac_sha256 *hash, unsigned char *out32) {
unsigned char temp[32];
secp256k1_sha256_finalize(&hash->inner, temp);
secp256k1_sha256_write(&hash->outer, temp, 32);
rustsecp256k1_v0_1_0_sha256_finalize(&hash->inner, temp);
rustsecp256k1_v0_1_0_sha256_write(&hash->outer, temp, 32);
memset(temp, 0, 32);
secp256k1_sha256_finalize(&hash->outer, out32);
rustsecp256k1_v0_1_0_sha256_finalize(&hash->outer, out32);
}
static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
secp256k1_hmac_sha256 hmac;
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) {
rustsecp256k1_v0_1_0_hmac_sha256 hmac;
static const unsigned char zero[1] = {0x00};
static const unsigned char one[1] = {0x01};
@ -212,47 +212,47 @@ static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha2
memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
/* RFC6979 3.2.d. */
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_write(&hmac, zero, 1);
secp256k1_hmac_sha256_write(&hmac, key, keylen);
secp256k1_hmac_sha256_finalize(&hmac, rng->k);
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v);
/* RFC6979 3.2.f. */
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_write(&hmac, one, 1);
secp256k1_hmac_sha256_write(&hmac, key, keylen);
secp256k1_hmac_sha256_finalize(&hmac, rng->k);
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, one, 1);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, key, keylen);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v);
rng->retry = 0;
}
static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) {
/* RFC6979 3.2.h. */
static const unsigned char zero[1] = {0x00};
if (rng->retry) {
secp256k1_hmac_sha256 hmac;
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_write(&hmac, zero, 1);
secp256k1_hmac_sha256_finalize(&hmac, rng->k);
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_1_0_hmac_sha256 hmac;
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, zero, 1);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->k);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v);
}
while (outlen > 0) {
secp256k1_hmac_sha256 hmac;
rustsecp256k1_v0_1_0_hmac_sha256 hmac;
int now = outlen;
secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
secp256k1_hmac_sha256_finalize(&hmac, rng->v);
rustsecp256k1_v0_1_0_hmac_sha256_initialize(&hmac, rng->k, 32);
rustsecp256k1_v0_1_0_hmac_sha256_write(&hmac, rng->v, 32);
rustsecp256k1_v0_1_0_hmac_sha256_finalize(&hmac, rng->v);
if (now > 32) {
now = 32;
}
@ -264,7 +264,7 @@ static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256
rng->retry = 1;
}
static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 *rng) {
static void rustsecp256k1_v0_1_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_1_0_rfc6979_hmac_sha256 *rng) {
memset(rng->k, 0, 32);
memset(rng->v, 0, 32);
rng->retry = 0;

View File

@ -69,7 +69,7 @@ public class NativeSecp256k1 {
r.lock();
try {
return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1;
return rustsecp256k1_v0_1_0_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1;
} finally {
r.unlock();
}
@ -101,7 +101,7 @@ public class NativeSecp256k1 {
r.lock();
try {
retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext());
retByteArray = rustsecp256k1_v0_1_0_ecdsa_sign(byteBuff, Secp256k1Context.getContext());
} finally {
r.unlock();
}
@ -134,7 +134,7 @@ public class NativeSecp256k1 {
r.lock();
try {
return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1;
return rustsecp256k1_v0_1_0_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1;
} finally {
r.unlock();
}
@ -166,7 +166,7 @@ public class NativeSecp256k1 {
r.lock();
try {
retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext());
retByteArray = rustsecp256k1_v0_1_0_ec_pubkey_create(byteBuff, Secp256k1Context.getContext());
} finally {
r.unlock();
}
@ -187,7 +187,7 @@ public class NativeSecp256k1 {
public static synchronized void cleanup() {
w.lock();
try {
secp256k1_destroy_context(Secp256k1Context.getContext());
rustsecp256k1_v0_1_0_destroy_context(Secp256k1Context.getContext());
} finally {
w.unlock();
}
@ -196,7 +196,7 @@ public class NativeSecp256k1 {
public static long cloneContext() {
r.lock();
try {
return secp256k1_ctx_clone(Secp256k1Context.getContext());
return rustsecp256k1_v0_1_0_ctx_clone(Secp256k1Context.getContext());
} finally { r.unlock(); }
}
@ -222,7 +222,7 @@ public class NativeSecp256k1 {
byte[][] retByteArray;
r.lock();
try {
retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext());
retByteArray = rustsecp256k1_v0_1_0_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext());
} finally {
r.unlock();
}
@ -261,7 +261,7 @@ public class NativeSecp256k1 {
byte[][] retByteArray;
r.lock();
try {
retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext());
retByteArray = rustsecp256k1_v0_1_0_privkey_tweak_add(byteBuff,Secp256k1Context.getContext());
} finally {
r.unlock();
}
@ -300,7 +300,7 @@ public class NativeSecp256k1 {
byte[][] retByteArray;
r.lock();
try {
retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length);
retByteArray = rustsecp256k1_v0_1_0_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length);
} finally {
r.unlock();
}
@ -339,7 +339,7 @@ public class NativeSecp256k1 {
byte[][] retByteArray;
r.lock();
try {
retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length);
retByteArray = rustsecp256k1_v0_1_0_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length);
} finally {
r.unlock();
}
@ -378,7 +378,7 @@ public class NativeSecp256k1 {
byte[][] retByteArray;
r.lock();
try {
retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length);
retByteArray = rustsecp256k1_v0_1_0_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length);
} finally {
r.unlock();
}
@ -411,36 +411,36 @@ public class NativeSecp256k1 {
w.lock();
try {
return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1;
return rustsecp256k1_v0_1_0_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1;
} finally {
w.unlock();
}
}
private static native long secp256k1_ctx_clone(long context);
private static native long rustsecp256k1_v0_1_0_ctx_clone(long context);
private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context);
private static native int rustsecp256k1_v0_1_0_context_randomize(ByteBuffer byteBuff, long context);
private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context);
private static native byte[][] rustsecp256k1_v0_1_0_privkey_tweak_add(ByteBuffer byteBuff, long context);
private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context);
private static native byte[][] rustsecp256k1_v0_1_0_privkey_tweak_mul(ByteBuffer byteBuff, long context);
private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen);
private static native byte[][] rustsecp256k1_v0_1_0_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen);
private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen);
private static native byte[][] rustsecp256k1_v0_1_0_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen);
private static native void secp256k1_destroy_context(long context);
private static native void rustsecp256k1_v0_1_0_destroy_context(long context);
private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen);
private static native int rustsecp256k1_v0_1_0_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen);
private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context);
private static native byte[][] rustsecp256k1_v0_1_0_ecdsa_sign(ByteBuffer byteBuff, long context);
private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context);
private static native int rustsecp256k1_v0_1_0_ec_seckey_verify(ByteBuffer byteBuff, long context);
private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context);
private static native byte[][] rustsecp256k1_v0_1_0_ec_pubkey_create(ByteBuffer byteBuff, long context);
private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen);
private static native byte[][] rustsecp256k1_v0_1_0_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen);
private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen);
private static native byte[][] rustsecp256k1_v0_1_0_ecdh(ByteBuffer byteBuff, long context, int inputLen);
}

View File

@ -29,7 +29,7 @@ public class Secp256k1Context {
long contextRef = -1;
try {
System.loadLibrary("secp256k1");
contextRef = secp256k1_init_context();
contextRef = rustsecp256k1_v0_1_0_init_context();
} catch (UnsatisfiedLinkError e) {
System.out.println("UnsatisfiedLinkError: " + e.toString());
isEnabled = false;
@ -47,5 +47,5 @@ public class Secp256k1Context {
return context;
}
private static native long secp256k1_init_context();
private static native long rustsecp256k1_v0_1_0_init_context();
}

View File

@ -7,12 +7,12 @@
#include "include/secp256k1_recovery.h"
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ctx_1clone
(JNIEnv* env, jclass classObject, jlong ctx_l)
{
const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
const rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx);
jlong ctx_clone_l = (uintptr_t) rustsecp256k1_v0_1_0_context_clone(ctx);
(void)classObject;(void)env;
@ -20,48 +20,48 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clo
}
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1context_1randomize
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
(void)classObject;
return secp256k1_context_randomize(ctx, seed);
return rustsecp256k1_v0_1_0_context_randomize(ctx, seed);
}
SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1destroy_1context
(JNIEnv* env, jclass classObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
secp256k1_context_destroy(ctx);
rustsecp256k1_v0_1_0_context_destroy(ctx);
(void)classObject;(void)env;
}
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1verify
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* sigdata = { (unsigned char*) (data + 32) };
const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) };
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pubkey;
rustsecp256k1_v0_1_0_ecdsa_signature sig;
rustsecp256k1_v0_1_0_pubkey pubkey;
int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen);
int ret = rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen);
if( ret ) {
ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
if( ret ) {
ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey);
ret = rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, data, &pubkey);
}
}
@ -70,10 +70,10 @@ SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1ve
return ret;
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1sign
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
unsigned char* secKey = (unsigned char*) (data + 32);
@ -81,15 +81,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e
jbyteArray sigArray, intsByteArray;
unsigned char intsarray[2];
secp256k1_ecdsa_signature sig[72];
rustsecp256k1_v0_1_0_ecdsa_signature sig[72];
int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL);
int ret = rustsecp256k1_v0_1_0_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL);
unsigned char outputSer[72];
size_t outputLen = 72;
if( ret ) {
int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2;
int ret2 = rustsecp256k1_v0_1_0_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2;
}
intsarray[0] = outputLen;
@ -112,36 +112,36 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e
return retArray;
}
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1seckey_1verify
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
(void)classObject;
return secp256k1_ec_seckey_verify(ctx, secKey);
return rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, secKey);
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1create
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
secp256k1_pubkey pubkey;
rustsecp256k1_v0_1_0_pubkey pubkey;
jobjectArray retArray;
jbyteArray pubkeyArray, intsByteArray;
unsigned char intsarray[2];
int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey);
int ret = rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, secKey);
unsigned char outputSer[65];
size_t outputLen = 65;
if( ret ) {
int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
}
intsarray[0] = outputLen;
@ -165,10 +165,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1add
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* tweak = (unsigned char*) (privkey + 32);
@ -178,7 +178,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
int privkeylen = 32;
int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak);
int ret = rustsecp256k1_v0_1_0_ec_privkey_tweak_add(ctx, privkey, tweak);
intsarray[0] = privkeylen;
intsarray[1] = ret;
@ -200,10 +200,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
return retArray;
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1mul
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* tweak = (unsigned char*) (privkey + 32);
@ -213,7 +213,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
int privkeylen = 32;
int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak);
int ret = rustsecp256k1_v0_1_0_ec_privkey_tweak_mul(ctx, privkey, tweak);
intsarray[0] = privkeylen;
intsarray[1] = ret;
@ -235,11 +235,11 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
return retArray;
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1add
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
/* rustsecp256k1_v0_1_0_pubkey* pubkey = (rustsecp256k1_v0_1_0_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/
unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* tweak = (unsigned char*) (pkey + publen);
@ -249,15 +249,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
unsigned char outputSer[65];
size_t outputLen = 65;
secp256k1_pubkey pubkey;
int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
rustsecp256k1_v0_1_0_pubkey pubkey;
int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
if( ret ) {
ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak);
ret = rustsecp256k1_v0_1_0_ec_pubkey_tweak_add(ctx, &pubkey, tweak);
}
if( ret ) {
int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
}
intsarray[0] = outputLen;
@ -280,10 +280,10 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
return retArray;
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1mul
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* tweak = (unsigned char*) (pkey + publen);
@ -293,15 +293,15 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
unsigned char outputSer[65];
size_t outputLen = 65;
secp256k1_pubkey pubkey;
int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
rustsecp256k1_v0_1_0_pubkey pubkey;
int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
if ( ret ) {
ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak);
ret = rustsecp256k1_v0_1_0_ec_pubkey_tweak_mul(ctx, &pubkey, tweak);
}
if( ret ) {
int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
int ret2 = rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
}
intsarray[0] = outputLen;
@ -324,7 +324,7 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1p
return retArray;
}
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1pubkey_1combine
(JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys)
{
(void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys;
@ -332,24 +332,24 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1p
return 0;
}
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdh
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
{
secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
rustsecp256k1_v0_1_0_context *ctx = (rustsecp256k1_v0_1_0_context*)(uintptr_t)ctx_l;
const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject);
const unsigned char* pubdata = (const unsigned char*) (secdata + 32);
jobjectArray retArray;
jbyteArray outArray, intsByteArray;
unsigned char intsarray[1];
secp256k1_pubkey pubkey;
rustsecp256k1_v0_1_0_pubkey pubkey;
unsigned char nonce_res[32];
size_t outputLen = 32;
int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
int ret = rustsecp256k1_v0_1_0_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
if (ret) {
ret = secp256k1_ecdh(
ret = rustsecp256k1_v0_1_0_ecdh(
ctx,
nonce_res,
&pubkey,

View File

@ -10,106 +10,106 @@ extern "C" {
#endif
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ctx_clone
* Method: rustsecp256k1_v0_1_0_ctx_clone
* Signature: (J)J
*/
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ctx_1clone
(JNIEnv *, jclass, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_context_randomize
* Method: rustsecp256k1_v0_1_0_context_randomize
* Signature: (Ljava/nio/ByteBuffer;J)I
*/
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1context_1randomize
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_privkey_tweak_add
* Method: rustsecp256k1_v0_1_0_privkey_tweak_add
* Signature: (Ljava/nio/ByteBuffer;J)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1add
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_privkey_tweak_mul
* Method: rustsecp256k1_v0_1_0_privkey_tweak_mul
* Signature: (Ljava/nio/ByteBuffer;J)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1privkey_1tweak_1mul
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_pubkey_tweak_add
* Method: rustsecp256k1_v0_1_0_pubkey_tweak_add
* Signature: (Ljava/nio/ByteBuffer;JI)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1add
(JNIEnv *, jclass, jobject, jlong, jint);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_pubkey_tweak_mul
* Method: rustsecp256k1_v0_1_0_pubkey_tweak_mul
* Signature: (Ljava/nio/ByteBuffer;JI)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1pubkey_1tweak_1mul
(JNIEnv *, jclass, jobject, jlong, jint);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_destroy_context
* Method: rustsecp256k1_v0_1_0_destroy_context
* Signature: (J)V
*/
SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1destroy_1context
(JNIEnv *, jclass, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ecdsa_verify
* Method: rustsecp256k1_v0_1_0_ecdsa_verify
* Signature: (Ljava/nio/ByteBuffer;JII)I
*/
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1verify
(JNIEnv *, jclass, jobject, jlong, jint, jint);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ecdsa_sign
* Method: rustsecp256k1_v0_1_0_ecdsa_sign
* Signature: (Ljava/nio/ByteBuffer;J)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdsa_1sign
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ec_seckey_verify
* Method: rustsecp256k1_v0_1_0_ec_seckey_verify
* Signature: (Ljava/nio/ByteBuffer;J)I
*/
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1seckey_1verify
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ec_pubkey_create
* Method: rustsecp256k1_v0_1_0_ec_pubkey_create
* Signature: (Ljava/nio/ByteBuffer;J)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1create
(JNIEnv *, jclass, jobject, jlong);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ec_pubkey_parse
* Method: rustsecp256k1_v0_1_0_ec_pubkey_parse
* Signature: (Ljava/nio/ByteBuffer;JI)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ec_1pubkey_1parse
(JNIEnv *, jclass, jobject, jlong, jint);
/*
* Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ecdh
* Method: rustsecp256k1_v0_1_0_ecdh
* Signature: (Ljava/nio/ByteBuffer;JI)[[B
*/
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_rustsecp256k1_v0_1_0_1ecdh
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen);

View File

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <stdint.h>
#include "org_bitcoin_Secp256k1Context.h"
#include "include/secp256k1.h"
SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_rustsecp256k1_v0_1_0_1init_1context
(JNIEnv* env, jclass classObject)
{
rustsecp256k1_v0_1_0_context *ctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
(void)classObject;(void)env;
return (uintptr_t)ctx;
}

View File

@ -10,10 +10,10 @@ extern "C" {
#endif
/*
* Class: org_bitcoin_Secp256k1Context
* Method: secp256k1_init_context
* Method: rustsecp256k1_v0_1_0_init_context
* Signature: ()J
*/
SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_rustsecp256k1_v0_1_0_1init_1context
(JNIEnv *, jclass);
#ifdef __cplusplus

View File

@ -1,4 +1,4 @@
include_HEADERS += include/secp256k1_ecdh.h
include_HEADERS += include/rustsecp256k1_v0_1_0_ecdh.h
noinst_HEADERS += src/modules/ecdh/main_impl.h
noinst_HEADERS += src/modules/ecdh/tests_impl.h
if USE_BENCHMARK

View File

@ -0,0 +1,67 @@
/**********************************************************************
* Copyright (c) 2015 Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_ECDH_MAIN_H
#define SECP256K1_MODULE_ECDH_MAIN_H
#include "include/secp256k1_ecdh.h"
#include "ecmult_const_impl.h"
static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x, const unsigned char *y, void *data) {
unsigned char version = (y[31] & 0x01) | 0x02;
rustsecp256k1_v0_1_0_sha256 sha;
(void)data;
rustsecp256k1_v0_1_0_sha256_initialize(&sha);
rustsecp256k1_v0_1_0_sha256_write(&sha, &version, 1);
rustsecp256k1_v0_1_0_sha256_write(&sha, x, 32);
rustsecp256k1_v0_1_0_sha256_finalize(&sha, output);
return 1;
}
const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256;
const rustsecp256k1_v0_1_0_ecdh_hash_function rustsecp256k1_v0_1_0_ecdh_hash_function_default = ecdh_hash_function_sha256;
int rustsecp256k1_v0_1_0_ecdh(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_1_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_1_0_ecdh_hash_function hashfp, void *data) {
int ret = 0;
int overflow = 0;
rustsecp256k1_v0_1_0_gej res;
rustsecp256k1_v0_1_0_ge pt;
rustsecp256k1_v0_1_0_scalar s;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(output != NULL);
ARG_CHECK(point != NULL);
ARG_CHECK(scalar != NULL);
if (hashfp == NULL) {
hashfp = rustsecp256k1_v0_1_0_ecdh_hash_function_default;
}
rustsecp256k1_v0_1_0_pubkey_load(ctx, &pt, point);
rustsecp256k1_v0_1_0_scalar_set_b32(&s, scalar, &overflow);
if (overflow || rustsecp256k1_v0_1_0_scalar_is_zero(&s)) {
ret = 0;
} else {
unsigned char x[32];
unsigned char y[32];
rustsecp256k1_v0_1_0_ecmult_const(&res, &pt, &s, 256);
rustsecp256k1_v0_1_0_ge_set_gej(&pt, &res);
/* Compute a hash of the point */
rustsecp256k1_v0_1_0_fe_normalize(&pt.x);
rustsecp256k1_v0_1_0_fe_normalize(&pt.y);
rustsecp256k1_v0_1_0_fe_get_b32(x, &pt.x);
rustsecp256k1_v0_1_0_fe_get_b32(y, &pt.y);
ret = hashfp(output, x, y, data);
}
rustsecp256k1_v0_1_0_scalar_clear(&s);
return ret;
}
#endif /* SECP256K1_MODULE_ECDH_MAIN_H */

View File

@ -26,69 +26,69 @@ int ecdh_hash_function_custom(unsigned char *output, const unsigned char *x, con
void test_ecdh_api(void) {
/* Setup context that just counts errors */
secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
secp256k1_pubkey point;
rustsecp256k1_v0_1_0_context *tctx = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_1_0_pubkey point;
unsigned char res[32];
unsigned char s_one[32] = { 0 };
int32_t ecount = 0;
s_one[31] = 1;
secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1);
rustsecp256k1_v0_1_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(tctx, &point, s_one) == 1);
/* Check all NULLs are detected */
CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 3);
/* Cleanup */
secp256k1_context_destroy(tctx);
rustsecp256k1_v0_1_0_context_destroy(tctx);
}
void test_ecdh_generator_basepoint(void) {
unsigned char s_one[32] = { 0 };
secp256k1_pubkey point[2];
rustsecp256k1_v0_1_0_pubkey point[2];
int i;
s_one[31] = 1;
/* Check against pubkey creation when the basepoint is the generator */
for (i = 0; i < 100; ++i) {
secp256k1_sha256 sha;
rustsecp256k1_v0_1_0_sha256 sha;
unsigned char s_b32[32];
unsigned char output_ecdh[65];
unsigned char output_ser[32];
unsigned char point_ser[65];
size_t point_ser_len = sizeof(point_ser);
secp256k1_scalar s;
rustsecp256k1_v0_1_0_scalar s;
random_scalar_order(&s);
secp256k1_scalar_get_b32(s_b32, &s);
rustsecp256k1_v0_1_0_scalar_get_b32(s_b32, &s);
CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1);
CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point[0], s_one) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
/* compute using ECDH function with custom hash function */
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1);
/* compute "explicitly" */
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
/* compare */
CHECK(memcmp(output_ecdh, point_ser, 65) == 0);
/* compute using ECDH function with default hash function */
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
/* compute "explicitly" */
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
secp256k1_sha256_initialize(&sha);
secp256k1_sha256_write(&sha, point_ser, point_ser_len);
secp256k1_sha256_finalize(&sha, output_ser);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
rustsecp256k1_v0_1_0_sha256_initialize(&sha);
rustsecp256k1_v0_1_0_sha256_write(&sha, point_ser, point_ser_len);
rustsecp256k1_v0_1_0_sha256_finalize(&sha, output_ser);
/* compare */
CHECK(memcmp(output_ecdh, output_ser, 32) == 0);
}
@ -104,23 +104,23 @@ void test_bad_scalar(void) {
};
unsigned char s_rand[32] = { 0 };
unsigned char output[32];
secp256k1_scalar rand;
secp256k1_pubkey point;
rustsecp256k1_v0_1_0_scalar rand;
rustsecp256k1_v0_1_0_pubkey point;
/* Create random point */
random_scalar_order(&rand);
secp256k1_scalar_get_b32(s_rand, &rand);
CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1);
rustsecp256k1_v0_1_0_scalar_get_b32(s_rand, &rand);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &point, s_rand) == 1);
/* Try to multiply it by bad values */
CHECK(secp256k1_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0);
CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0);
/* ...and a good one */
s_overflow[31] -= 1;
CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1);
/* Hash function failure results in ecdh failure */
CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0);
}
void run_ecdh_tests(void) {

View File

@ -1,4 +1,4 @@
include_HEADERS += include/secp256k1_recovery.h
include_HEADERS += include/rustsecp256k1_v0_1_0_recovery.h
noinst_HEADERS += src/modules/recovery/main_impl.h
noinst_HEADERS += src/modules/recovery/tests_impl.h
if USE_BENCHMARK

View File

@ -0,0 +1,193 @@
/**********************************************************************
* Copyright (c) 2013-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_RECOVERY_MAIN_H
#define SECP256K1_MODULE_RECOVERY_MAIN_H
#include "include/secp256k1_recovery.h"
static void rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_scalar* r, rustsecp256k1_v0_1_0_scalar* s, int* recid, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig) {
(void)ctx;
if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) {
/* When the rustsecp256k1_v0_1_0_scalar type is exactly 32 byte, use its
* representation inside rustsecp256k1_v0_1_0_ecdsa_signature, as conversion is very fast.
* Note that rustsecp256k1_v0_1_0_ecdsa_signature_save must use the same representation. */
memcpy(r, &sig->data[0], 32);
memcpy(s, &sig->data[32], 32);
} else {
rustsecp256k1_v0_1_0_scalar_set_b32(r, &sig->data[0], NULL);
rustsecp256k1_v0_1_0_scalar_set_b32(s, &sig->data[32], NULL);
}
*recid = sig->data[64];
}
static void rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_1_0_scalar* r, const rustsecp256k1_v0_1_0_scalar* s, int recid) {
if (sizeof(rustsecp256k1_v0_1_0_scalar) == 32) {
memcpy(&sig->data[0], r, 32);
memcpy(&sig->data[32], s, 32);
} else {
rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[0], r);
rustsecp256k1_v0_1_0_scalar_get_b32(&sig->data[32], s);
}
sig->data[64] = recid;
}
int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
rustsecp256k1_v0_1_0_scalar r, s;
int ret = 1;
int overflow = 0;
(void)ctx;
ARG_CHECK(sig != NULL);
ARG_CHECK(input64 != NULL);
ARG_CHECK(recid >= 0 && recid <= 3);
rustsecp256k1_v0_1_0_scalar_set_b32(&r, &input64[0], &overflow);
ret &= !overflow;
rustsecp256k1_v0_1_0_scalar_set_b32(&s, &input64[32], &overflow);
ret &= !overflow;
if (ret) {
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
} else {
memset(sig, 0, sizeof(*sig));
}
return ret;
}
int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_1_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sig) {
rustsecp256k1_v0_1_0_scalar r, s;
(void)ctx;
ARG_CHECK(output64 != NULL);
ARG_CHECK(sig != NULL);
ARG_CHECK(recid != NULL);
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
rustsecp256k1_v0_1_0_scalar_get_b32(&output64[0], &r);
rustsecp256k1_v0_1_0_scalar_get_b32(&output64[32], &s);
return 1;
}
int rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_signature* sig, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature* sigin) {
rustsecp256k1_v0_1_0_scalar r, s;
int recid;
(void)ctx;
ARG_CHECK(sig != NULL);
ARG_CHECK(sigin != NULL);
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
rustsecp256k1_v0_1_0_ecdsa_signature_save(sig, &r, &s);
return 1;
}
static int rustsecp256k1_v0_1_0_ecdsa_sig_recover(const rustsecp256k1_v0_1_0_ecmult_context *ctx, const rustsecp256k1_v0_1_0_scalar *sigr, const rustsecp256k1_v0_1_0_scalar* sigs, rustsecp256k1_v0_1_0_ge *pubkey, const rustsecp256k1_v0_1_0_scalar *message, int recid) {
unsigned char brx[32];
rustsecp256k1_v0_1_0_fe fx;
rustsecp256k1_v0_1_0_ge x;
rustsecp256k1_v0_1_0_gej xj;
rustsecp256k1_v0_1_0_scalar rn, u1, u2;
rustsecp256k1_v0_1_0_gej qj;
int r;
if (rustsecp256k1_v0_1_0_scalar_is_zero(sigr) || rustsecp256k1_v0_1_0_scalar_is_zero(sigs)) {
return 0;
}
rustsecp256k1_v0_1_0_scalar_get_b32(brx, sigr);
r = rustsecp256k1_v0_1_0_fe_set_b32(&fx, brx);
(void)r;
VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
if (recid & 2) {
if (rustsecp256k1_v0_1_0_fe_cmp_var(&fx, &rustsecp256k1_v0_1_0_ecdsa_const_p_minus_order) >= 0) {
return 0;
}
rustsecp256k1_v0_1_0_fe_add(&fx, &rustsecp256k1_v0_1_0_ecdsa_const_order_as_fe);
}
if (!rustsecp256k1_v0_1_0_ge_set_xo_var(&x, &fx, recid & 1)) {
return 0;
}
rustsecp256k1_v0_1_0_gej_set_ge(&xj, &x);
rustsecp256k1_v0_1_0_scalar_inverse_var(&rn, sigr);
rustsecp256k1_v0_1_0_scalar_mul(&u1, &rn, message);
rustsecp256k1_v0_1_0_scalar_negate(&u1, &u1);
rustsecp256k1_v0_1_0_scalar_mul(&u2, &rn, sigs);
rustsecp256k1_v0_1_0_ecmult(ctx, &qj, &xj, &u2, &u1);
rustsecp256k1_v0_1_0_ge_set_gej_var(pubkey, &qj);
return !rustsecp256k1_v0_1_0_gej_is_infinity(&qj);
}
int rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_1_0_nonce_function noncefp, const void* noncedata) {
rustsecp256k1_v0_1_0_scalar r, s;
rustsecp256k1_v0_1_0_scalar sec, non, msg;
int recid;
int ret = 0;
int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(seckey != NULL);
if (noncefp == NULL) {
noncefp = rustsecp256k1_v0_1_0_nonce_function_default;
}
rustsecp256k1_v0_1_0_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !rustsecp256k1_v0_1_0_scalar_is_zero(&sec)) {
unsigned char nonce32[32];
unsigned int count = 0;
rustsecp256k1_v0_1_0_scalar_set_b32(&msg, msg32, NULL);
while (1) {
ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
rustsecp256k1_v0_1_0_scalar_set_b32(&non, nonce32, &overflow);
if (!rustsecp256k1_v0_1_0_scalar_is_zero(&non) && !overflow) {
if (rustsecp256k1_v0_1_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
break;
}
}
count++;
}
memset(nonce32, 0, 32);
rustsecp256k1_v0_1_0_scalar_clear(&msg);
rustsecp256k1_v0_1_0_scalar_clear(&non);
rustsecp256k1_v0_1_0_scalar_clear(&sec);
}
if (ret) {
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
} else {
memset(signature, 0, sizeof(*signature));
}
return ret;
}
int rustsecp256k1_v0_1_0_ecdsa_recover(const rustsecp256k1_v0_1_0_context* ctx, rustsecp256k1_v0_1_0_pubkey *pubkey, const rustsecp256k1_v0_1_0_ecdsa_recoverable_signature *signature, const unsigned char *msg32) {
rustsecp256k1_v0_1_0_ge q;
rustsecp256k1_v0_1_0_scalar r, s;
rustsecp256k1_v0_1_0_scalar m;
int recid;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(rustsecp256k1_v0_1_0_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(msg32 != NULL);
ARG_CHECK(signature != NULL);
ARG_CHECK(pubkey != NULL);
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */
rustsecp256k1_v0_1_0_scalar_set_b32(&m, msg32, NULL);
if (rustsecp256k1_v0_1_0_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
rustsecp256k1_v0_1_0_pubkey_save(pubkey, &q);
return 1;
} else {
memset(pubkey, 0, sizeof(*pubkey));
return 0;
}
}
#endif /* SECP256K1_MODULE_RECOVERY_MAIN_H */

View File

@ -0,0 +1,393 @@
/**********************************************************************
* Copyright (c) 2013-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_RECOVERY_TESTS_H
#define SECP256K1_MODULE_RECOVERY_TESTS_H
static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
(void) msg32;
(void) key32;
(void) algo16;
(void) data;
/* On the first run, return 0 to force a second run */
if (counter == 0) {
memset(nonce32, 0, 32);
return 1;
}
/* On the second run, return an overflow to force a third run */
if (counter == 1) {
memset(nonce32, 0xff, 32);
return 1;
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
return rustsecp256k1_v0_1_0_rand_bits(1);
}
void test_ecdsa_recovery_api(void) {
/* Setup contexts that just count errors */
rustsecp256k1_v0_1_0_context *none = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_NONE);
rustsecp256k1_v0_1_0_context *sign = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN);
rustsecp256k1_v0_1_0_context *vrfy = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_1_0_context *both = rustsecp256k1_v0_1_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
rustsecp256k1_v0_1_0_pubkey pubkey;
rustsecp256k1_v0_1_0_pubkey recpubkey;
rustsecp256k1_v0_1_0_ecdsa_signature normal_sig;
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature recsig;
unsigned char privkey[32] = { 1 };
unsigned char message[32] = { 2 };
int32_t ecount = 0;
int recid = 0;
unsigned char sig[74];
unsigned char zero_privkey[32] = { 0 };
unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
rustsecp256k1_v0_1_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
rustsecp256k1_v0_1_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
/* Construct and verify corresponding public key. */
CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Check bad contexts and NULLs for signing */
ecount = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
CHECK(ecount == 5);
/* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
CHECK(ecount == 5);
/* These will all fail, but not in ARG_CHECK way */
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
/* This one will succeed. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 5);
/* Check signing with a goofy nonce function */
/* Check bad contexts and NULLs for recovery */
ecount = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, NULL, &recsig, message) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
CHECK(ecount == 5);
/* Check NULLs for conversion */
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
/* Check NULLs for de/serialization */
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
CHECK(ecount == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
CHECK(ecount == 2);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
CHECK(ecount == 3);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
CHECK(ecount == 4);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
CHECK(ecount == 5);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
CHECK(ecount == 6);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
CHECK(ecount == 7);
/* overflow in signature will fail but not affect ecount */
memcpy(sig, over_privkey, 32);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
CHECK(ecount == 7);
/* cleanup */
rustsecp256k1_v0_1_0_context_destroy(none);
rustsecp256k1_v0_1_0_context_destroy(sign);
rustsecp256k1_v0_1_0_context_destroy(vrfy);
rustsecp256k1_v0_1_0_context_destroy(both);
}
void test_ecdsa_recovery_end_to_end(void) {
unsigned char extra[32] = {0x00};
unsigned char privkey[32];
unsigned char message[32];
rustsecp256k1_v0_1_0_ecdsa_signature signature[5];
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsignature[5];
unsigned char sig[74];
rustsecp256k1_v0_1_0_pubkey pubkey;
rustsecp256k1_v0_1_0_pubkey recpubkey;
int recid = 0;
/* Generate a random key and message. */
{
rustsecp256k1_v0_1_0_scalar msg, key;
random_scalar_order_test(&msg);
random_scalar_order_test(&key);
rustsecp256k1_v0_1_0_scalar_get_b32(privkey, &key);
rustsecp256k1_v0_1_0_scalar_get_b32(message, &msg);
}
/* Construct and verify corresponding public key. */
CHECK(rustsecp256k1_v0_1_0_ec_seckey_verify(ctx, privkey) == 1);
CHECK(rustsecp256k1_v0_1_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Serialize/parse compact and verify/recover. */
extra[0] = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
extra[31] = 1;
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
extra[31] = 0;
extra[0] = 1;
CHECK(rustsecp256k1_v0_1_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
/* Parse compact (with recovery id) and recover. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
sig[rustsecp256k1_v0_1_0_rand_bits(6)] += 1 + rustsecp256k1_v0_1_0_rand_int(255);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */
void test_ecdsa_recovery_edge_cases(void) {
const unsigned char msg32[32] = {
'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
'a', ' ', 'v', 'e', 'r', 'y', ' ', 's',
'e', 'c', 'r', 'e', 't', ' ', 'm', 'e',
's', 's', 'a', 'g', 'e', '.', '.', '.'
};
const unsigned char sig64[64] = {
/* Generated by signing the above message with nonce 'This is the nonce we will use...'
* and secret key 0 (which is not valid), resulting in recid 0. */
0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8,
0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96,
0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63,
0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32,
0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E,
0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD,
0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
};
rustsecp256k1_v0_1_0_pubkey pubkey;
/* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
const unsigned char sigb64[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
rustsecp256k1_v0_1_0_pubkey pubkeyb;
rustsecp256k1_v0_1_0_ecdsa_recoverable_signature rsig;
rustsecp256k1_v0_1_0_ecdsa_signature sig;
int recid;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
CHECK(!rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
for (recid = 0; recid < 4; recid++) {
int i;
int recid2;
/* (4,4) encoded in DER. */
unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04};
unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01};
unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00};
unsigned char sigbderalt1[39] = {
0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
};
unsigned char sigbderalt2[39] = {
0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
unsigned char sigbderalt3[40] = {
0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
};
unsigned char sigbderalt4[40] = {
0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
};
/* (order + r,4) encoded in DER. */
unsigned char sigbderlong[40] = {
0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC,
0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
};
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
for (recid2 = 0; recid2 < 4; recid2++) {
rustsecp256k1_v0_1_0_pubkey pubkey2b;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
/* Verifying with (order + r,4) should always fail. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
/* DER parsing tests. */
/* Zero length r/s. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
/* Leading zeros. */
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
sigbderalt3[4] = 1;
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbderalt4[7] = 1;
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
/* Damage signature. */
sigbder[7]++;
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
sigbder[7]--;
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
for(i = 0; i < 8; i++) {
int c;
unsigned char orig = sigbder[i];
/*Try every single-byte change.*/
for (c = 0; c < 256; c++) {
if (c == orig ) {
continue;
}
sigbder[i] = c;
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
}
sigbder[i] = orig;
}
}
/* Test r/s equal to zero */
{
/* (1,1) encoded in DER. */
unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01};
unsigned char sigc64[64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
};
rustsecp256k1_v0_1_0_pubkey pubkeyc;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
sigcder[4] = 0;
sigc64[31] = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
sigcder[4] = 1;
sigcder[7] = 0;
sigc64[31] = 1;
sigc64[63] = 0;
CHECK(rustsecp256k1_v0_1_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
CHECK(rustsecp256k1_v0_1_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
CHECK(rustsecp256k1_v0_1_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
}
}
void run_recovery_tests(void) {
int i;
for (i = 0; i < count; i++) {
test_ecdsa_recovery_api();
}
for (i = 0; i < 64*count; i++) {
test_ecdsa_recovery_end_to_end();
}
test_ecdsa_recovery_edge_cases();
}
#endif /* SECP256K1_MODULE_RECOVERY_TESTS_H */

View File

@ -0,0 +1,74 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_NUM_H
#define SECP256K1_NUM_H
#ifndef USE_NUM_NONE
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#if defined(USE_NUM_GMP)
#include "num_gmp.h"
#else
#error "Please select num implementation"
#endif
/** Copy a number. */
static void rustsecp256k1_v0_1_0_num_copy(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a);
/** Convert a number's absolute value to a binary big-endian string.
* There must be enough place. */
static void rustsecp256k1_v0_1_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_1_0_num *a);
/** Set a number to the value of a binary big-endian string. */
static void rustsecp256k1_v0_1_0_num_set_bin(rustsecp256k1_v0_1_0_num *r, const unsigned char *a, unsigned int alen);
/** Compute a modular inverse. The input must be less than the modulus. */
static void rustsecp256k1_v0_1_0_num_mod_inverse(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *m);
/** Compute the jacobi symbol (a|b). b must be positive and odd. */
static int rustsecp256k1_v0_1_0_num_jacobi(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Compare the absolute value of two numbers. */
static int rustsecp256k1_v0_1_0_num_cmp(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Test whether two number are equal (including sign). */
static int rustsecp256k1_v0_1_0_num_eq(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Add two (signed) numbers. */
static void rustsecp256k1_v0_1_0_num_add(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Subtract two (signed) numbers. */
static void rustsecp256k1_v0_1_0_num_sub(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Multiply two (signed) numbers. */
static void rustsecp256k1_v0_1_0_num_mul(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b);
/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
even if r was negative. */
static void rustsecp256k1_v0_1_0_num_mod(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *m);
/** Right-shift the passed number by bits bits. */
static void rustsecp256k1_v0_1_0_num_shift(rustsecp256k1_v0_1_0_num *r, int bits);
/** Check whether a number is zero. */
static int rustsecp256k1_v0_1_0_num_is_zero(const rustsecp256k1_v0_1_0_num *a);
/** Check whether a number is one. */
static int rustsecp256k1_v0_1_0_num_is_one(const rustsecp256k1_v0_1_0_num *a);
/** Check whether a number is strictly negative. */
static int rustsecp256k1_v0_1_0_num_is_neg(const rustsecp256k1_v0_1_0_num *a);
/** Change a number's sign. */
static void rustsecp256k1_v0_1_0_num_negate(rustsecp256k1_v0_1_0_num *r);
#endif
#endif /* SECP256K1_NUM_H */

View File

@ -15,6 +15,6 @@ typedef struct {
mp_limb_t data[2*NUM_LIMBS];
int neg;
int limbs;
} secp256k1_num;
} rustsecp256k1_v0_1_0_num;
#endif /* SECP256K1_NUM_REPR_H */

View File

@ -15,18 +15,18 @@
#include "num.h"
#ifdef VERIFY
static void secp256k1_num_sanity(const secp256k1_num *a) {
static void rustsecp256k1_v0_1_0_num_sanity(const rustsecp256k1_v0_1_0_num *a) {
VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0));
}
#else
#define secp256k1_num_sanity(a) do { } while(0)
#define rustsecp256k1_v0_1_0_num_sanity(a) do { } while(0)
#endif
static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) {
static void rustsecp256k1_v0_1_0_num_copy(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a) {
*r = *a;
}
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) {
static void rustsecp256k1_v0_1_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1_v0_1_0_num *a) {
unsigned char tmp[65];
int len = 0;
int shift = 0;
@ -42,7 +42,7 @@ static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const sec
memset(tmp, 0, sizeof(tmp));
}
static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) {
static void rustsecp256k1_v0_1_0_num_set_bin(rustsecp256k1_v0_1_0_num *r, const unsigned char *a, unsigned int alen) {
int len;
VERIFY_CHECK(alen > 0);
VERIFY_CHECK(alen <= 64);
@ -59,7 +59,7 @@ static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsi
}
}
static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
static void rustsecp256k1_v0_1_0_num_add_abs(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs);
r->limbs = a->limbs;
if (c != 0) {
@ -68,7 +68,7 @@ static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, cons
}
}
static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
static void rustsecp256k1_v0_1_0_num_sub_abs(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
(void)c;
VERIFY_CHECK(c == 0);
@ -78,9 +78,9 @@ static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, cons
}
}
static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) {
secp256k1_num_sanity(r);
secp256k1_num_sanity(m);
static void rustsecp256k1_v0_1_0_num_mod(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *m) {
rustsecp256k1_v0_1_0_num_sanity(r);
rustsecp256k1_v0_1_0_num_sanity(m);
if (r->limbs >= m->limbs) {
mp_limb_t t[2*NUM_LIMBS];
@ -93,20 +93,20 @@ static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) {
}
if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
secp256k1_num_sub_abs(r, m, r);
rustsecp256k1_v0_1_0_num_sub_abs(r, m, r);
r->neg = 0;
}
}
static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) {
static void rustsecp256k1_v0_1_0_num_mod_inverse(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *m) {
int i;
mp_limb_t g[NUM_LIMBS+1];
mp_limb_t u[NUM_LIMBS+1];
mp_limb_t v[NUM_LIMBS+1];
mp_size_t sn;
mp_size_t gn;
secp256k1_num_sanity(a);
secp256k1_num_sanity(m);
rustsecp256k1_v0_1_0_num_sanity(a);
rustsecp256k1_v0_1_0_num_sanity(m);
/** mpn_gcdext computes: (G,S) = gcdext(U,V), where
* * G = gcd(U,V)
@ -144,11 +144,11 @@ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a,
memset(v, 0, sizeof(v));
}
static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) {
static int rustsecp256k1_v0_1_0_num_jacobi(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
int ret;
mpz_t ga, gb;
secp256k1_num_sanity(a);
secp256k1_num_sanity(b);
rustsecp256k1_v0_1_0_num_sanity(a);
rustsecp256k1_v0_1_0_num_sanity(b);
VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1));
mpz_inits(ga, gb, NULL);
@ -166,19 +166,19 @@ static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b)
return ret;
}
static int secp256k1_num_is_one(const secp256k1_num *a) {
static int rustsecp256k1_v0_1_0_num_is_one(const rustsecp256k1_v0_1_0_num *a) {
return (a->limbs == 1 && a->data[0] == 1);
}
static int secp256k1_num_is_zero(const secp256k1_num *a) {
static int rustsecp256k1_v0_1_0_num_is_zero(const rustsecp256k1_v0_1_0_num *a) {
return (a->limbs == 1 && a->data[0] == 0);
}
static int secp256k1_num_is_neg(const secp256k1_num *a) {
static int rustsecp256k1_v0_1_0_num_is_neg(const rustsecp256k1_v0_1_0_num *a) {
return (a->limbs > 1 || a->data[0] != 0) && a->neg;
}
static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) {
static int rustsecp256k1_v0_1_0_num_cmp(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
if (a->limbs > b->limbs) {
return 1;
}
@ -188,54 +188,54 @@ static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) {
return mpn_cmp(a->data, b->data, a->limbs);
}
static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) {
static int rustsecp256k1_v0_1_0_num_eq(const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
if (a->limbs > b->limbs) {
return 0;
}
if (a->limbs < b->limbs) {
return 0;
}
if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) {
if ((a->neg && !rustsecp256k1_v0_1_0_num_is_zero(a)) != (b->neg && !rustsecp256k1_v0_1_0_num_is_zero(b))) {
return 0;
}
return mpn_cmp(a->data, b->data, a->limbs) == 0;
}
static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) {
static void rustsecp256k1_v0_1_0_num_subadd(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b, int bneg) {
if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */
r->neg = a->neg;
if (a->limbs >= b->limbs) {
secp256k1_num_add_abs(r, a, b);
rustsecp256k1_v0_1_0_num_add_abs(r, a, b);
} else {
secp256k1_num_add_abs(r, b, a);
rustsecp256k1_v0_1_0_num_add_abs(r, b, a);
}
} else {
if (secp256k1_num_cmp(a, b) > 0) {
if (rustsecp256k1_v0_1_0_num_cmp(a, b) > 0) {
r->neg = a->neg;
secp256k1_num_sub_abs(r, a, b);
rustsecp256k1_v0_1_0_num_sub_abs(r, a, b);
} else {
r->neg = b->neg ^ bneg;
secp256k1_num_sub_abs(r, b, a);
rustsecp256k1_v0_1_0_num_sub_abs(r, b, a);
}
}
}
static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
secp256k1_num_sanity(a);
secp256k1_num_sanity(b);
secp256k1_num_subadd(r, a, b, 0);
static void rustsecp256k1_v0_1_0_num_add(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
rustsecp256k1_v0_1_0_num_sanity(a);
rustsecp256k1_v0_1_0_num_sanity(b);
rustsecp256k1_v0_1_0_num_subadd(r, a, b, 0);
}
static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
secp256k1_num_sanity(a);
secp256k1_num_sanity(b);
secp256k1_num_subadd(r, a, b, 1);
static void rustsecp256k1_v0_1_0_num_sub(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
rustsecp256k1_v0_1_0_num_sanity(a);
rustsecp256k1_v0_1_0_num_sanity(b);
rustsecp256k1_v0_1_0_num_subadd(r, a, b, 1);
}
static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
static void rustsecp256k1_v0_1_0_num_mul(rustsecp256k1_v0_1_0_num *r, const rustsecp256k1_v0_1_0_num *a, const rustsecp256k1_v0_1_0_num *b) {
mp_limb_t tmp[2*NUM_LIMBS+1];
secp256k1_num_sanity(a);
secp256k1_num_sanity(b);
rustsecp256k1_v0_1_0_num_sanity(a);
rustsecp256k1_v0_1_0_num_sanity(b);
VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1);
if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) {
@ -259,7 +259,7 @@ static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const se
memset(tmp, 0, sizeof(tmp));
}
static void secp256k1_num_shift(secp256k1_num *r, int bits) {
static void rustsecp256k1_v0_1_0_num_shift(rustsecp256k1_v0_1_0_num *r, int bits) {
if (bits % GMP_NUMB_BITS) {
/* Shift within limbs. */
mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
@ -281,7 +281,7 @@ static void secp256k1_num_shift(secp256k1_num *r, int bits) {
}
}
static void secp256k1_num_negate(secp256k1_num *r) {
static void rustsecp256k1_v0_1_0_num_negate(rustsecp256k1_v0_1_0_num *r) {
r->neg ^= 1;
}

Some files were not shown because too many files have changed in this diff Show More