rust-bitcoin-unsafe-fast/src/util/psbt/macros.rs

180 lines
6.2 KiB
Rust
Raw Normal View History

// Rust Bitcoin Library
// Written by
// The Rust Bitcoin developers
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
#[allow(unused_macros)]
macro_rules! hex_psbt {
($s:expr) => { $crate::consensus::deserialize::<$crate::util::psbt::PartiallySignedTransaction>(&<$crate::prelude::Vec<u8> as $crate::hashes::hex::FromHex>::from_hex($s).unwrap()) };
}
macro_rules! combine {
($thing:ident, $slf:ident, $other:ident) => {
if let (&None, Some($thing)) = (&$slf.$thing, $other.$thing) {
$slf.$thing = Some($thing);
}
};
}
macro_rules! impl_psbt_de_serialize {
($thing:ty) => {
impl_psbt_serialize!($thing);
impl_psbt_deserialize!($thing);
};
}
macro_rules! impl_psbt_deserialize {
($thing:ty) => {
2020-01-25 04:21:57 +00:00
impl $crate::util::psbt::serialize::Deserialize for $thing {
fn deserialize(bytes: &[u8]) -> Result<Self, $crate::consensus::encode::Error> {
$crate::consensus::deserialize(&bytes[..])
}
}
};
}
macro_rules! impl_psbt_serialize {
($thing:ty) => {
2020-01-25 04:21:57 +00:00
impl $crate::util::psbt::serialize::Serialize for $thing {
fn serialize(&self) -> $crate::prelude::Vec<u8> {
2020-01-25 04:21:57 +00:00
$crate::consensus::serialize(self)
}
}
};
}
macro_rules! impl_psbtmap_consensus_encoding {
($thing:ty) => {
2020-01-25 04:21:57 +00:00
impl $crate::consensus::Encodable for $thing {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
fn consensus_encode<W: $crate::io::Write>(
&self,
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
w: &mut W,
2021-06-09 10:40:41 +00:00
) -> Result<usize, $crate::io::Error> {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
self.consensus_encode_map(w)
}
}
};
}
macro_rules! impl_psbtmap_consensus_decoding {
($thing:ty) => {
2020-01-25 04:21:57 +00:00
impl $crate::consensus::Decodable for $thing {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
fn consensus_decode<R: $crate::io::Read>(
r: &mut R,
2020-01-25 04:21:57 +00:00
) -> Result<Self, $crate::consensus::encode::Error> {
2021-06-09 10:40:41 +00:00
let mut rv: Self = ::core::default::Default::default();
loop {
Take Writer/Reader by `&mut` in consensus en/decoding Fix #1020 (see more relevant discussion there) This definitely makes the amount of generics compiler has to generate by avoding generating the same functions for `R`, &mut R`, `&mut &mut R` and so on. old: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` new: ``` > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266 > strip target/release/deps/bitcoin-07a9dabf1f3e0266 > ls -al target/release/deps/bitcoin-07a9dabf1f3e0266 -rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266 ``` In the unit-test binary itself, it saves ~100KB of data. I did not expect much performance gains, but turn out I was wrong(*): old: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871) test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833) test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732) test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0) ``` new: ``` test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910) test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363) test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695) test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694) test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2) test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0) test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0) ``` (*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think at least it doesn't make anything slower. While doing all this manual labor that will probably generate conflicts, I took a liberty of changing generic type names and variable names to `r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
match $crate::consensus::Decodable::consensus_decode(r) {
Ok(pair) => rv.insert_pair(pair)?,
2020-01-25 04:21:57 +00:00
Err($crate::consensus::encode::Error::Psbt($crate::util::psbt::Error::NoMorePairs)) => return Ok(rv),
Err(e) => return Err(e),
}
}
}
}
};
}
macro_rules! impl_psbtmap_consensus_enc_dec_oding {
($thing:ty) => {
impl_psbtmap_consensus_decoding!($thing);
impl_psbtmap_consensus_encoding!($thing);
};
}
#[rustfmt::skip]
macro_rules! impl_psbt_insert_pair {
($slf:ident.$unkeyed_name:ident <= <$raw_key:ident: _>|<$raw_value:ident: $unkeyed_value_type:ty>) => {
if $raw_key.key.is_empty() {
2019-08-05 19:41:07 +00:00
if $slf.$unkeyed_name.is_none() {
2020-01-25 04:21:57 +00:00
let val: $unkeyed_value_type = $crate::util::psbt::serialize::Deserialize::deserialize(&$raw_value)?;
$slf.$unkeyed_name = Some(val)
} else {
2020-01-25 04:21:57 +00:00
return Err($crate::util::psbt::Error::DuplicateKey($raw_key).into());
}
} else {
2020-01-25 04:21:57 +00:00
return Err($crate::util::psbt::Error::InvalidKey($raw_key).into());
}
};
($slf:ident.$keyed_name:ident <= <$raw_key:ident: $keyed_key_type:ty>|<$raw_value:ident: $keyed_value_type:ty>) => {
if !$raw_key.key.is_empty() {
2020-01-25 04:21:57 +00:00
let key_val: $keyed_key_type = $crate::util::psbt::serialize::Deserialize::deserialize(&$raw_key.key)?;
match $slf.$keyed_name.entry(key_val) {
$crate::prelude::btree_map::Entry::Vacant(empty_key) => {
2020-01-25 04:21:57 +00:00
let val: $keyed_value_type = $crate::util::psbt::serialize::Deserialize::deserialize(&$raw_value)?;
empty_key.insert(val);
}
$crate::prelude::btree_map::Entry::Occupied(_) => return Err($crate::util::psbt::Error::DuplicateKey($raw_key).into()),
}
} else {
2020-01-25 04:21:57 +00:00
return Err($crate::util::psbt::Error::InvalidKey($raw_key).into());
}
};
}
#[rustfmt::skip]
macro_rules! impl_psbt_get_pair {
($rv:ident.push($slf:ident.$unkeyed_name:ident, $unkeyed_typeval:ident)) => {
if let Some(ref $unkeyed_name) = $slf.$unkeyed_name {
2020-01-25 04:21:57 +00:00
$rv.push($crate::util::psbt::raw::Pair {
key: $crate::util::psbt::raw::Key {
type_value: $unkeyed_typeval,
key: vec![],
},
2020-01-25 04:21:57 +00:00
value: $crate::util::psbt::serialize::Serialize::serialize($unkeyed_name),
});
}
};
($rv:ident.push_map($slf:ident.$keyed_name:ident, $keyed_typeval:ident)) => {
for (key, val) in &$slf.$keyed_name {
2020-01-25 04:21:57 +00:00
$rv.push($crate::util::psbt::raw::Pair {
key: $crate::util::psbt::raw::Key {
type_value: $keyed_typeval,
2020-01-25 04:21:57 +00:00
key: $crate::util::psbt::serialize::Serialize::serialize(key),
},
2020-01-25 04:21:57 +00:00
value: $crate::util::psbt::serialize::Serialize::serialize(val),
});
}
};
}
// macros for serde of hashes
macro_rules! impl_psbt_hash_de_serialize {
($hash_type:ty) => {
impl_psbt_hash_serialize!($hash_type);
impl_psbt_hash_deserialize!($hash_type);
};
}
macro_rules! impl_psbt_hash_deserialize {
($hash_type:ty) => {
impl $crate::util::psbt::serialize::Deserialize for $hash_type {
fn deserialize(bytes: &[u8]) -> Result<Self, $crate::consensus::encode::Error> {
<$hash_type>::from_slice(&bytes[..]).map_err(|e| {
$crate::util::psbt::Error::from(e).into()
})
}
}
};
}
macro_rules! impl_psbt_hash_serialize {
($hash_type:ty) => {
impl $crate::util::psbt::serialize::Serialize for $hash_type {
fn serialize(&self) -> $crate::prelude::Vec<u8> {
self.into_inner().to_vec()
}
}
};
}