rust-bitcoin-unsafe-fast/bitcoin/src/internal_macros.rs

216 lines
7.7 KiB
Rust
Raw Normal View History

// Written in 2014 by Andrew Poelstra <apoelstra@wpsoftware.net>
// SPDX-License-Identifier: CC0-1.0
2014-07-18 13:56:17 +00:00
//! Internal macros.
//!
//! Macros meant to be used inside the Rust Bitcoin library.
//!
macro_rules! impl_consensus_encoding {
($thing:ident, $($field:ident),+) => (
2020-01-25 04:19:46 +00:00
impl $crate::consensus::Encodable for $thing {
#[inline]
fn consensus_encode<R: $crate::io::Write + ?Sized>(
&self,
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
2021-06-09 10:40:41 +00:00
) -> Result<usize, $crate::io::Error> {
let mut len = 0;
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
$(len += self.$field.consensus_encode(r)?;)+
Ok(len)
}
}
2014-07-18 13:56:17 +00:00
2020-01-25 04:19:46 +00:00
impl $crate::consensus::Decodable for $thing {
#[inline]
fn consensus_decode_from_finite_reader<R: $crate::io::Read + ?Sized>(
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
) -> Result<$thing, $crate::consensus::encode::Error> {
Ok($thing {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
$($field: $crate::consensus::Decodable::consensus_decode_from_finite_reader(r)?),+
})
}
#[inline]
fn consensus_decode<R: $crate::io::Read + ?Sized>(
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
r: &mut R,
2020-01-25 04:19:46 +00:00
) -> Result<$thing, $crate::consensus::encode::Error> {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
use crate::io::Read as _;
let mut r = r.take($crate::consensus::encode::MAX_VEC_SIZE as u64);
Ok($thing {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
$($field: $crate::consensus::Decodable::consensus_decode(r.by_ref())?),+
})
}
}
);
}
pub(crate) use impl_consensus_encoding;
// We use test_macros module to keep things organised, re-export everything for ease of use.
#[cfg(test)]
pub(crate) use test_macros::*;
#[cfg(test)]
mod test_macros {
macro_rules! hex (($hex:expr) => (<Vec<u8> as hashes::hex::FromHex>::from_hex($hex).unwrap()));
pub(crate) use hex;
}
2021-11-16 22:03:37 +00:00
/// Implements several traits for byte-based newtypes.
/// Implements:
/// - core::fmt::LowerHex
/// - core::fmt::UpperHex
2021-06-09 10:40:41 +00:00
/// - core::fmt::Display
/// - core::str::FromStr
/// - hashes::hex::FromHex
macro_rules! impl_bytes_newtype {
($t:ident, $len:literal) => {
impl $t {
/// Returns a reference the underlying bytes.
#[inline]
pub fn as_bytes(&self) -> &[u8; $len] { &self.0 }
/// Returns the underlying bytes.
#[inline]
pub fn to_bytes(self) -> [u8; $len] {
// We rely on `Copy` being implemented for $t so conversion
// methods use the correct Rust naming conventions.
fn check_copy<T: Copy>() {}
check_copy::<$t>();
self.0
}
}
impl core::fmt::LowerHex for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
use bitcoin_internals::hex::{display, Case};
display::fmt_hex_exact!(f, $len, &self.0, Case::Lower)
}
}
impl core::fmt::UpperHex for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
use bitcoin_internals::hex::{display, Case};
display::fmt_hex_exact!(f, $len, &self.0, Case::Upper)
}
}
impl core::fmt::Display for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(self, f)
}
}
impl core::fmt::Debug for $t {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
core::fmt::LowerHex::fmt(self, f)
}
}
2020-01-25 04:19:46 +00:00
impl $crate::hashes::hex::FromHex for $t {
fn from_byte_iter<I>(iter: I) -> Result<Self, $crate::hashes::hex::Error>
where
I: core::iter::Iterator<Item = Result<u8, $crate::hashes::hex::Error>>
+ core::iter::ExactSizeIterator
+ core::iter::DoubleEndedIterator,
{
if iter.len() == $len {
let mut ret = [0; $len];
for (n, byte) in iter.enumerate() {
ret[n] = byte?;
}
Ok($t(ret))
} else {
2020-01-25 04:19:46 +00:00
Err($crate::hashes::hex::Error::InvalidLength(2 * $len, 2 * iter.len()))
}
}
}
impl core::str::FromStr for $t {
2020-01-25 04:19:46 +00:00
type Err = $crate::hashes::hex::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
2020-01-25 04:19:46 +00:00
$crate::hashes::hex::FromHex::from_hex(s)
}
}
#[cfg(feature = "serde")]
#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
2020-01-25 04:19:46 +00:00
impl $crate::serde::Serialize for $t {
fn serialize<S: $crate::serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
if s.is_human_readable() {
s.collect_str(self)
} else {
s.serialize_bytes(&self[..])
}
}
}
#[cfg(feature = "serde")]
#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::Deserialize<'de> for $t {
fn deserialize<D: $crate::serde::Deserializer<'de>>(d: D) -> Result<$t, D::Error> {
if d.is_human_readable() {
struct HexVisitor;
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::de::Visitor<'de> for HexVisitor {
type Value = $t;
fn expecting(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.write_str("an ASCII hex string")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
use $crate::serde::de::Unexpected;
if let Ok(hex) = core::str::from_utf8(v) {
2020-01-25 04:19:46 +00:00
$crate::hashes::hex::FromHex::from_hex(hex).map_err(E::custom)
} else {
return Err(E::invalid_value(Unexpected::Bytes(v), &self));
}
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
2020-01-25 04:19:46 +00:00
$crate::hashes::hex::FromHex::from_hex(v).map_err(E::custom)
}
}
d.deserialize_str(HexVisitor)
} else {
struct BytesVisitor;
2020-01-25 04:19:46 +00:00
impl<'de> $crate::serde::de::Visitor<'de> for BytesVisitor {
type Value = $t;
fn expecting(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.write_str("a bytestring")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
2020-01-25 04:19:46 +00:00
E: $crate::serde::de::Error,
{
if v.len() != $len {
Err(E::invalid_length(v.len(), &stringify!($len)))
} else {
let mut ret = [0; $len];
ret.copy_from_slice(v);
Ok($t(ret))
}
}
}
d.deserialize_bytes(BytesVisitor)
}
}
}
};
}
pub(crate) use impl_bytes_newtype;