rust-secp256k1-unsafe-fast/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h

193 lines
7.8 KiB
C
Raw Normal View History

Upgrade the vendored libsecp256k1 code `libsecp256k1` v0.2.0 was just released. Update the vendored code using `./vendor-libsecp.sh depend 0_8_0 21ffe4b` ``` git show 21ffe4b commit 21ffe4b22a9683cf24ae0763359e401d1284cc7a (tag: v0.2.0) Merge: 8c949f5 e025ccd Author: Pieter Wuille <pieter@wuille.net> Date: Mon Dec 12 17:00:52 2022 -0500 Merge bitcoin-core/secp256k1#1055: Prepare initial release e025ccdf7473702a76bb13d763dc096548ffefba release: prepare for initial release 0.2.0 (Jonas Nick) 6d1784a2e2c1c5a8d89ffb08a7f76fa15e84fff5 build: add missing files to EXTRA_DIST (Jonas Nick) 13bf1b6b324f2ed1c1fb4c8d17a4febd3556839e changelog: make order of change types match keepachangelog.com (Jonas Nick) b1f992a552785395d2e60b10862626fd11f66f84 doc: improve release process (Jonas Nick) ad39e2dc417f85c1577a6a6a9c519f5c60453def build: change package version to 0.1.0-dev (Jonas Nick) 90618e9263ebc2a0d73d487d6d94fd3af96b973c doc: move CHANGELOG from doc/ to root directory (Jonas Nick) Pull request description: Based on #964 ACKs for top commit: sipa: ACK e025ccdf7473702a76bb13d763dc096548ffefba Tree-SHA512: b9ab71d7362537d383a32b5e321ef44069f00e3e92340375bcd662267bc5a60c2bad60222998e6602cfac24ad65efb23d772eac37c86065036b90ef090b54c49 ``` Requires a new version of `secp256k1-sys`, use v0.8.0 - Update the `secp256k1-sys` manifest (including links field) - Update symbols to use 0_8_0 - Add a changelog entry - depend on the new version in `secp256k1` Which in turn requires a new version of `secp256k1`, use v0.26.0
2022-12-20 21:11:14 +00:00
#ifndef SECP256K1_INT128_STRUCT_IMPL_H
#define SECP256K1_INT128_STRUCT_IMPL_H
#include "int128.h"
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */
# include <intrin.h>
# if defined(_M_ARM64) || defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
/* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications.
(Define SECP256K1_MSVC_MULH_TEST_OVERRIDE to test this code path on X64,
which supports both __(u)mulh and _umul128.) */
# if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
# pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.")
# endif
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
*hi = __umulh(a, b);
return a * b;
}
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_mul128(int64_t a, int64_t b, int64_t* hi) {
*hi = __mulh(a, b);
return (uint64_t)a * (uint64_t)b;
}
# else
/* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
# define rustsecp256k1_v0_8_0_umul128 _umul128
# define rustsecp256k1_v0_8_0_mul128 _mul128
# endif
#else
/* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
uint64_t lh = (uint32_t)a * (b >> 32);
uint64_t hl = (a >> 32) * (uint32_t)b;
uint64_t hh = (a >> 32) * (b >> 32);
uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
*hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
return (mid34 << 32) + (uint32_t)ll;
}
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_mul128(int64_t a, int64_t b, int64_t* hi) {
uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
int64_t lh = (uint32_t)a * (b >> 32);
int64_t hl = (a >> 32) * (uint32_t)b;
int64_t hh = (a >> 32) * (b >> 32);
uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
*hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
return (mid34 << 32) + (uint32_t)ll;
}
#endif
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_load(rustsecp256k1_v0_8_0_uint128 *r, uint64_t hi, uint64_t lo) {
r->hi = hi;
r->lo = lo;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) {
r->lo = rustsecp256k1_v0_8_0_umul128(a, b, &r->hi);
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) {
uint64_t lo, hi;
lo = rustsecp256k1_v0_8_0_umul128(a, b, &hi);
r->lo += lo;
r->hi += hi + (r->lo < lo);
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) {
r->lo += a;
r->hi += r->lo < a;
}
/* Unsigned (logical) right shift.
* Non-constant time in n.
*/
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_rshift(rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) {
VERIFY_CHECK(n < 128);
if (n >= 64) {
r->lo = r->hi >> (n-64);
r->hi = 0;
} else if (n > 0) {
r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
r->hi >>= n;
}
}
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_to_u64(const rustsecp256k1_v0_8_0_uint128 *a) {
return a->lo;
}
static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_hi_u64(const rustsecp256k1_v0_8_0_uint128 *a) {
return a->hi;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_from_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) {
r->hi = 0;
r->lo = a;
}
static SECP256K1_INLINE int rustsecp256k1_v0_8_0_u128_check_bits(const rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) {
VERIFY_CHECK(n < 128);
return n >= 64 ? r->hi >> (n - 64) == 0
: r->hi == 0 && r->lo >> n == 0;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_load(rustsecp256k1_v0_8_0_int128 *r, int64_t hi, uint64_t lo) {
r->hi = hi;
r->lo = lo;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) {
int64_t hi;
r->lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi);
r->hi = (uint64_t)hi;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_accum_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) {
int64_t hi;
uint64_t lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi);
r->lo += lo;
hi += r->lo < lo;
/* Verify no overflow.
* If r represents a positive value (the sign bit is not set) and the value we are adding is a positive value (the sign bit is not set),
* then we require that the resulting value also be positive (the sign bit is not set).
* Note that (X <= Y) means (X implies Y) when X and Y are boolean values (i.e. 0 or 1).
*/
VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi <= 0x7fffffffffffffffu));
/* Verify no underflow.
* If r represents a negative value (the sign bit is set) and the value we are adding is a negative value (the sign bit is set),
* then we require that the resulting value also be negative (the sign bit is set).
*/
VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi > 0x7fffffffffffffffu));
r->hi += hi;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_dissip_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) {
int64_t hi;
uint64_t lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi);
hi += r->lo < lo;
/* Verify no overflow.
* If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set),
* then we require that the resulting value also be positive (the sign bit is not set).
*/
VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi <= 0x7fffffffffffffffu));
/* Verify no underflow.
* If r represents a negative value (the sign bit is set) and the value we are subtracting is a positive value (the sign sign bit is not set),
* then we require that the resulting value also be negative (the sign bit is set).
*/
VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi > 0x7fffffffffffffffu));
r->hi -= hi;
r->lo -= lo;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_det(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
rustsecp256k1_v0_8_0_i128_mul(r, a, d);
rustsecp256k1_v0_8_0_i128_dissip_mul(r, b, c);
}
/* Signed (arithmetic) right shift.
* Non-constant time in n.
*/
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_rshift(rustsecp256k1_v0_8_0_int128 *r, unsigned int n) {
VERIFY_CHECK(n < 128);
if (n >= 64) {
r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64));
r->hi = (uint64_t)((int64_t)(r->hi) >> 63);
} else if (n > 0) {
r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
r->hi = (uint64_t)((int64_t)(r->hi) >> n);
}
}
static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_i128_to_i64(const rustsecp256k1_v0_8_0_int128 *a) {
return (int64_t)a->lo;
}
static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_from_i64(rustsecp256k1_v0_8_0_int128 *r, int64_t a) {
r->hi = (uint64_t)(a >> 63);
r->lo = (uint64_t)a;
}
static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_eq_var(const rustsecp256k1_v0_8_0_int128 *a, const rustsecp256k1_v0_8_0_int128 *b) {
return a->hi == b->hi && a->lo == b->lo;
}
static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_check_pow2(const rustsecp256k1_v0_8_0_int128 *r, unsigned int n) {
VERIFY_CHECK(n < 127);
return n >= 64 ? r->hi == (uint64_t)1 << (n - 64) && r->lo == 0
: r->hi == 0 && r->lo == (uint64_t)1 << n;
}
#endif