/*********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ #ifndef SECP256K1_FIELD_IMPL_H #define SECP256K1_FIELD_IMPL_H #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif #include "util.h" #include "num.h" #if defined(SECP256K1_WIDEMUL_INT128) #include "field_5x52_impl.h" #elif defined(SECP256K1_WIDEMUL_INT64) #include "field_10x26_impl.h" #else #error "Please select wide multiplication implementation" #endif SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { rustsecp256k1_v0_4_0_fe na; rustsecp256k1_v0_4_0_fe_negate(&na, a, 1); rustsecp256k1_v0_4_0_fe_add(&na, b); return rustsecp256k1_v0_4_0_fe_normalizes_to_zero(&na); } SECP256K1_INLINE static int rustsecp256k1_v0_4_0_fe_equal_var(const rustsecp256k1_v0_4_0_fe *a, const rustsecp256k1_v0_4_0_fe *b) { rustsecp256k1_v0_4_0_fe na; rustsecp256k1_v0_4_0_fe_negate(&na, a, 1); rustsecp256k1_v0_4_0_fe_add(&na, b); return rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&na); } static int rustsecp256k1_v0_4_0_fe_sqrt(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * * As (p+1)/4 is an even number, it will have the same result for a and for * (-a). Only one of these two numbers actually has a square root however, * so we test at the end by squaring and comparing to the input. * Also because (p+1)/4 is an even number, the computed square root is * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). */ rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; VERIFY_CHECK(r != a); /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ rustsecp256k1_v0_4_0_fe_sqr(&x2, a); rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a); rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2); rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6); } rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9); } rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11); } rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22); } rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44); } rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88); } rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176); } rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220); } rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223); } rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22); for (j=0; j<6; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2); rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); rustsecp256k1_v0_4_0_fe_sqr(r, &t1); /* Check that a square root was actually calculated */ rustsecp256k1_v0_4_0_fe_sqr(&t1, r); return rustsecp256k1_v0_4_0_fe_equal(&t1, a); } static void rustsecp256k1_v0_4_0_fe_inv(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { rustsecp256k1_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ rustsecp256k1_v0_4_0_fe_sqr(&x2, a); rustsecp256k1_v0_4_0_fe_mul(&x2, &x2, a); rustsecp256k1_v0_4_0_fe_sqr(&x3, &x2); rustsecp256k1_v0_4_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x6, &x6); } rustsecp256k1_v0_4_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x9, &x9); } rustsecp256k1_v0_4_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x11, &x11); } rustsecp256k1_v0_4_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x22, &x22); } rustsecp256k1_v0_4_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x44, &x44); } rustsecp256k1_v0_4_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x88, &x88); } rustsecp256k1_v0_4_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x176, &x176); } rustsecp256k1_v0_4_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x220, &x220); } rustsecp256k1_v0_4_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&x223, &x223); } rustsecp256k1_v0_4_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x22); for (j=0; j<5; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, a); for (j=0; j<3; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(&t1, &t1, &x2); for (j=0; j<2; j++) { rustsecp256k1_v0_4_0_fe_sqr(&t1, &t1); } rustsecp256k1_v0_4_0_fe_mul(r, a, &t1); } static void rustsecp256k1_v0_4_0_fe_inv_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a) { #if defined(USE_FIELD_INV_BUILTIN) rustsecp256k1_v0_4_0_fe_inv(r, a); #elif defined(USE_FIELD_INV_NUM) rustsecp256k1_v0_4_0_num n, m; static const rustsecp256k1_v0_4_0_fe negone = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL ); /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ static const unsigned char prime[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F }; unsigned char b[32]; int res; rustsecp256k1_v0_4_0_fe c = *a; rustsecp256k1_v0_4_0_fe_normalize_var(&c); rustsecp256k1_v0_4_0_fe_get_b32(b, &c); rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32); rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32); rustsecp256k1_v0_4_0_num_mod_inverse(&n, &n, &m); rustsecp256k1_v0_4_0_num_get_bin(b, 32, &n); res = rustsecp256k1_v0_4_0_fe_set_b32(r, b); (void)res; VERIFY_CHECK(res); /* Verify the result is the (unique) valid inverse using non-GMP code. */ rustsecp256k1_v0_4_0_fe_mul(&c, &c, r); rustsecp256k1_v0_4_0_fe_add(&c, &negone); CHECK(rustsecp256k1_v0_4_0_fe_normalizes_to_zero_var(&c)); #else #error "Please select field inverse implementation" #endif } static void rustsecp256k1_v0_4_0_fe_inv_all_var(rustsecp256k1_v0_4_0_fe *r, const rustsecp256k1_v0_4_0_fe *a, size_t len) { rustsecp256k1_v0_4_0_fe u; size_t i; if (len < 1) { return; } VERIFY_CHECK((r + len <= a) || (a + len <= r)); r[0] = a[0]; i = 0; while (++i < len) { rustsecp256k1_v0_4_0_fe_mul(&r[i], &r[i - 1], &a[i]); } rustsecp256k1_v0_4_0_fe_inv_var(&u, &r[--i]); while (i > 0) { size_t j = i--; rustsecp256k1_v0_4_0_fe_mul(&r[j], &r[i], &u); rustsecp256k1_v0_4_0_fe_mul(&u, &u, &a[j]); } r[0] = u; } static int rustsecp256k1_v0_4_0_fe_is_quad_var(const rustsecp256k1_v0_4_0_fe *a) { #ifndef USE_NUM_NONE unsigned char b[32]; rustsecp256k1_v0_4_0_num n; rustsecp256k1_v0_4_0_num m; /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ static const unsigned char prime[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F }; rustsecp256k1_v0_4_0_fe c = *a; rustsecp256k1_v0_4_0_fe_normalize_var(&c); rustsecp256k1_v0_4_0_fe_get_b32(b, &c); rustsecp256k1_v0_4_0_num_set_bin(&n, b, 32); rustsecp256k1_v0_4_0_num_set_bin(&m, prime, 32); return rustsecp256k1_v0_4_0_num_jacobi(&n, &m) >= 0; #else rustsecp256k1_v0_4_0_fe r; return rustsecp256k1_v0_4_0_fe_sqrt(&r, a); #endif } static const rustsecp256k1_v0_4_0_fe rustsecp256k1_v0_4_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); #endif /* SECP256K1_FIELD_IMPL_H */