rust-secp256k1-unsafe-fast/secp256k1-sys/depend/secp256k1/src/field_impl.h

321 lines
10 KiB
C
Raw Normal View History

/***********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
2015-10-26 14:54:21 +00:00
#ifndef SECP256K1_FIELD_IMPL_H
#define SECP256K1_FIELD_IMPL_H
2015-10-26 14:54:21 +00:00
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
#endif
#include "util.h"
#include "num.h"
2015-10-26 14:54:21 +00:00
#if defined(SECP256K1_WIDEMUL_INT128)
2015-10-26 14:54:21 +00:00
#include "field_5x52_impl.h"
#elif defined(SECP256K1_WIDEMUL_INT64)
#include "field_10x26_impl.h"
2015-10-26 14:54:21 +00:00
#else
#error "Please select wide multiplication implementation"
2015-10-26 14:54:21 +00:00
#endif
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
rustsecp256k1_v0_4_0_fe na;
rustsecp256k1_v0_4_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_0_fe_add(&na, b);
return rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&na);
}
SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) {
rustsecp256k1_v0_4_0_fe na;
rustsecp256k1_v0_4_0_fe_negate(&na, a, 1);
rustsecp256k1_v0_4_0_fe_add(&na, b);
return rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&na);
2015-10-26 14:54:21 +00:00
}
static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
/** Given that p is congruent to 3 mod 4, we can compute the square root of
* a mod p as the (p+1)/4'th power of a.
*
* As (p+1)/4 is an even number, it will have the same result for a and for
* (-a). Only one of these two numbers actually has a square root however,
* so we test at the end by squaring and comparing to the input.
* Also because (p+1)/4 is an even number, the computed square root is
* itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)).
*/
rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
2015-10-26 14:54:21 +00:00
int j;
VERIFY_CHECK(r != a);
2015-10-26 14:54:21 +00:00
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_4_0_fe_sqr(&x2, a);
rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a);
2015-10-26 14:54:21 +00:00
rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a);
2015-10-26 14:54:21 +00:00
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3);
2015-10-26 14:54:21 +00:00
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3);
2015-10-26 14:54:21 +00:00
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2);
2015-10-26 14:54:21 +00:00
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11);
2015-10-26 14:54:21 +00:00
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22);
2015-10-26 14:54:21 +00:00
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44);
2015-10-26 14:54:21 +00:00
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88);
2015-10-26 14:54:21 +00:00
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44);
2015-10-26 14:54:21 +00:00
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3);
2015-10-26 14:54:21 +00:00
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22);
2015-10-26 14:54:21 +00:00
for (j=0; j<6; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2);
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
rustsecp256k1_v0_4_0_fe_sqr(r, &t1);
2015-10-26 14:54:21 +00:00
/* Check that a square root was actually calculated */
rustsecp256k1_v0_4_0_fe_sqr(&t1, r);
return rustsecp256k1_v0_4_0_fe_equal(&t1, a);
2015-10-26 14:54:21 +00:00
}
static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
2015-10-26 14:54:21 +00:00
int j;
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
*/
rustsecp256k1_v0_4_0_fe_sqr(&x2, a);
rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a);
2015-10-26 14:54:21 +00:00
rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2);
rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a);
2015-10-26 14:54:21 +00:00
x6 = x3;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3);
2015-10-26 14:54:21 +00:00
x9 = x6;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3);
2015-10-26 14:54:21 +00:00
x11 = x9;
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2);
2015-10-26 14:54:21 +00:00
x22 = x11;
for (j=0; j<11; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11);
2015-10-26 14:54:21 +00:00
x44 = x22;
for (j=0; j<22; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22);
2015-10-26 14:54:21 +00:00
x88 = x44;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44);
2015-10-26 14:54:21 +00:00
x176 = x88;
for (j=0; j<88; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88);
2015-10-26 14:54:21 +00:00
x220 = x176;
for (j=0; j<44; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44);
2015-10-26 14:54:21 +00:00
x223 = x220;
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3);
2015-10-26 14:54:21 +00:00
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22);
2015-10-26 14:54:21 +00:00
for (j=0; j<5; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, a);
2015-10-26 14:54:21 +00:00
for (j=0; j<3; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2);
2015-10-26 14:54:21 +00:00
for (j=0; j<2; j++) {
rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_mul(r, a, &t1);
2015-10-26 14:54:21 +00:00
}
static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) {
2015-10-26 14:54:21 +00:00
#if defined(USE_FIELD_INV_BUILTIN)
rustsecp256k1_v0_4_0_fe_inv(r, a);
2015-10-26 14:54:21 +00:00
#elif defined(USE_FIELD_INV_NUM)
rustsecp256k1_v0_4_0_num n, m;
static const rustsecp256k1_v0_4_0_fe negone = SECP256K1_FE_CONST(
2015-10-26 14:54:21 +00:00
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
);
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
unsigned char b[32];
int res;
rustsecp256k1_v0_4_0_fe c = *a;
rustsecp256k1_v0_4_0_fe_normalize_var(&c);
rustsecp256k1_v0_4_0_fe_get_b32(b, &c);
rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32);
rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m);
rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n);
res = rustsecp256k1_v0_4_0_fe_set_b32(r, b);
2015-10-26 14:54:21 +00:00
(void)res;
VERIFY_CHECK(res);
/* Verify the result is the (unique) valid inverse using non-GMP code. */
rustsecp256k1_v0_4_0_fe_mul(&c, &c, r);
rustsecp256k1_v0_4_0_fe_add(&c, &negone);
CHECK(rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&c));
2015-10-26 14:54:21 +00:00
#else
#error "Please select field inverse implementation"
#endif
}
static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len) {
rustsecp256k1_v0_4_0_fe u;
2015-10-26 14:54:21 +00:00
size_t i;
if (len < 1) {
return;
}
VERIFY_CHECK((r + len <= a) || (a + len <= r));
r[0] = a[0];
i = 0;
while (++i < len) {
rustsecp256k1_v0_4_0_fe_mul(&r[i], &r[i - 1], &a[i]);
2015-10-26 14:54:21 +00:00
}
rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[--i]);
2015-10-26 14:54:21 +00:00
while (i > 0) {
size_t j = i--;
rustsecp256k1_v0_4_0_fe_mul(&r[j], &r[i], &u);
rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[j]);
2015-10-26 14:54:21 +00:00
}
r[0] = u;
}
static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a) {
#ifndef USE_NUM_NONE
unsigned char b[32];
rustsecp256k1_v0_4_0_num n;
rustsecp256k1_v0_4_0_num m;
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
};
rustsecp256k1_v0_4_0_fe c = *a;
rustsecp256k1_v0_4_0_fe_normalize_var(&c);
rustsecp256k1_v0_4_0_fe_get_b32(b, &c);
rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32);
rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32);
return rustsecp256k1_v0_4_0_num_jacobi(&n, &m) >= 0;
#else
rustsecp256k1_v0_4_0_fe r;
return rustsecp256k1_v0_4_0_fe_sqrt(&r, a);
2015-10-26 14:54:21 +00:00
#endif
}
static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
#endif /* SECP256K1_FIELD_IMPL_H */