rust-bitcoin-unsafe-fast/bitcoin/src/internal_macros.rs

226 lines
8.0 KiB
Rust
Raw Normal View History

// SPDX-License-Identifier: CC0-1.0
2014-07-18 13:56:17 +00:00
//! Internal macros.
//!
//! Macros meant to be used inside the Rust Bitcoin library.
//!
macro_rules! impl_consensus_encoding {
($thing:ident, $($field:ident),+) => (
2020-01-25 04:19:46 +00:00
impl $crate::consensus::Encodable for $thing {
#[inline]
fn consensus_encode<R: $crate::io::Write + ?Sized>(
&self,
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
) -> core::result::Result<usize, $crate::io::Error> {
let mut len = 0;
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
$(len += self.$field.consensus_encode(r)?;)+
Ok(len)
}
}
2014-07-18 13:56:17 +00:00
2020-01-25 04:19:46 +00:00
impl $crate::consensus::Decodable for $thing {
#[inline]
fn consensus_decode_from_finite_reader<R: $crate::io::BufRead + ?Sized>(
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
) -> core::result::Result<$thing, $crate::consensus::encode::Error> {
Ok($thing {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
$($field: $crate::consensus::Decodable::consensus_decode_from_finite_reader(r)?),+
})
}
#[inline]
fn consensus_decode<R: $crate::io::BufRead + ?Sized>(
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
) -> core::result::Result<$thing, $crate::consensus::encode::Error> {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
let mut r = r.take($crate::consensus::encode::MAX_VEC_SIZE as u64);
Ok($thing {
$($field: $crate::consensus::Decodable::consensus_decode(&mut r)?),+
})
}
}
);
}
pub(crate) use impl_consensus_encoding;
2021-11-16 22:03:37 +00:00
/// Implements several traits for byte-based newtypes.
/// Implements:
/// - core::fmt::LowerHex
/// - core::fmt::UpperHex
2021-06-09 10:40:41 +00:00
/// - core::fmt::Display
/// - core::str::FromStr
macro_rules! impl_bytes_newtype {
($t:ident, $len:literal) => {
impl $t {
/// Returns a reference the underlying bytes.
#[inline]
pub fn as_bytes(&self) -> &[u8; $len] { &self.0 }
/// Returns the underlying bytes.
#[inline]
pub fn to_bytes(self) -> [u8; $len] {
// We rely on `Copy` being implemented for $t so conversion
// methods use the correct Rust naming conventions.
fn check_copy<T: Copy>() {}
check_copy::<$t>();
self.0
}
/// Creates `Self` from a hex string.
pub fn from_hex(s: &str) -> Result<Self, hex::HexToArrayError> {
Ok($t($crate::hex::FromHex::from_hex(s)?))
}
}
impl core::fmt::LowerHex for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
use $crate::hex::{display, Case};
display::fmt_hex_exact!(f, $len, &self.0, Case::Lower)
}
}
impl core::fmt::UpperHex for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
use $crate::hex::{display, Case};
display::fmt_hex_exact!(f, $len, &self.0, Case::Upper)
}
}
impl core::fmt::Display for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(self, f)
}
}
impl core::fmt::Debug for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(self, f)
}
}
impl core::str::FromStr for $t {
type Err = $crate::hex::HexToArrayError;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> { Self::from_hex(s) }
}
#[cfg(feature = "serde")]
2020-01-25 04:19:46 +00:00
impl $crate::serde::Serialize for $t {
fn serialize<S: $crate::serde::Serializer>(&self, s: S) -> core::result::Result<S::Ok, S::Error> {
if s.is_human_readable() {
s.collect_str(self)
} else {
s.serialize_bytes(&self[..])
}
}
}
#[cfg(feature = "serde")]
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::Deserialize<'de> for $t {
fn deserialize<D: $crate::serde::Deserializer<'de>>(d: D) -> core::result::Result<$t, D::Error> {
if d.is_human_readable() {
struct HexVisitor;
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::de::Visitor<'de> for HexVisitor {
type Value = $t;
fn expecting(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.write_str("an ASCII hex string")
}
fn visit_bytes<E>(self, v: &[u8]) -> core::result::Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
use $crate::serde::de::Unexpected;
if let Ok(hex) = core::str::from_utf8(v) {
core::str::FromStr::from_str(hex).map_err(E::custom)
} else {
return Err(E::invalid_value(Unexpected::Bytes(v), &self));
}
}
fn visit_str<E>(self, hex: &str) -> core::result::Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
core::str::FromStr::from_str(hex).map_err(E::custom)
}
}
d.deserialize_str(HexVisitor)
} else {
struct BytesVisitor;
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::de::Visitor<'de> for BytesVisitor {
type Value = $t;
fn expecting(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.write_str("a bytestring")
}
fn visit_bytes<E>(self, v: &[u8]) -> core::result::Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
if v.len() != $len {
Err(E::invalid_length(v.len(), &stringify!($len)))
} else {
let mut ret = [0; $len];
ret.copy_from_slice(v);
Ok($t(ret))
}
}
}
d.deserialize_bytes(BytesVisitor)
}
}
}
};
}
pub(crate) use impl_bytes_newtype;
#[rustfmt::skip]
macro_rules! impl_hashencode {
($hashtype:ident) => {
impl $crate::consensus::Encodable for $hashtype {
fn consensus_encode<W: $crate::io::Write + ?Sized>(&self, w: &mut W) -> core::result::Result<usize, $crate::io::Error> {
self.0.consensus_encode(w)
}
}
impl $crate::consensus::Decodable for $hashtype {
fn consensus_decode<R: $crate::io::BufRead + ?Sized>(r: &mut R) -> core::result::Result<Self, $crate::consensus::encode::Error> {
use $crate::hashes::Hash;
Ok(Self::from_byte_array(<<$hashtype as $crate::hashes::Hash>::Bytes>::consensus_decode(r)?))
}
}
};
}
pub(crate) use impl_hashencode;
#[rustfmt::skip]
macro_rules! impl_asref_push_bytes {
($($hashtype:ident),*) => {
$(
impl AsRef<$crate::blockdata::script::PushBytes> for $hashtype {
fn as_ref(&self) -> &$crate::blockdata::script::PushBytes {
use $crate::hashes::Hash;
self.as_byte_array().into()
}
}
impl From<$hashtype> for $crate::blockdata::script::PushBytesBuf {
fn from(hash: $hashtype) -> Self {
use $crate::hashes::Hash;
hash.as_byte_array().into()
}
}
)*
};
}
pub(crate) use impl_asref_push_bytes;