2022-06-29 04:05:31 +00:00
|
|
|
// SPDX-License-Identifier: CC0-1.0
|
2018-09-09 10:34:29 +00:00
|
|
|
|
2018-08-10 20:34:35 +00:00
|
|
|
#[allow(unused_macros)]
|
|
|
|
macro_rules! hex_psbt {
|
2022-08-05 03:23:03 +00:00
|
|
|
($s:expr) => { $crate::consensus::deserialize::<$crate::psbt::PartiallySignedTransaction>(&<$crate::prelude::Vec<u8> as $crate::hashes::hex::FromHex>::from_hex($s).unwrap()) };
|
2018-08-10 20:34:35 +00:00
|
|
|
}
|
|
|
|
|
2022-02-22 13:41:20 +00:00
|
|
|
macro_rules! combine {
|
2018-09-09 04:20:29 +00:00
|
|
|
($thing:ident, $slf:ident, $other:ident) => {
|
|
|
|
if let (&None, Some($thing)) = (&$slf.$thing, $other.$thing) {
|
|
|
|
$slf.$thing = Some($thing);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-09-09 04:07:07 +00:00
|
|
|
macro_rules! impl_psbt_de_serialize {
|
|
|
|
($thing:ty) => {
|
|
|
|
impl_psbt_serialize!($thing);
|
|
|
|
impl_psbt_deserialize!($thing);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbt_deserialize {
|
|
|
|
($thing:ty) => {
|
2022-08-05 03:23:03 +00:00
|
|
|
impl $crate::psbt::serialize::Deserialize for $thing {
|
2020-01-25 04:21:57 +00:00
|
|
|
fn deserialize(bytes: &[u8]) -> Result<Self, $crate::consensus::encode::Error> {
|
|
|
|
$crate::consensus::deserialize(&bytes[..])
|
2018-09-09 04:07:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbt_serialize {
|
|
|
|
($thing:ty) => {
|
2022-08-05 03:23:03 +00:00
|
|
|
impl $crate::psbt::serialize::Serialize for $thing {
|
2021-06-09 10:34:44 +00:00
|
|
|
fn serialize(&self) -> $crate::prelude::Vec<u8> {
|
2020-01-25 04:21:57 +00:00
|
|
|
$crate::consensus::serialize(self)
|
2018-09-09 04:07:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbtmap_consensus_encoding {
|
|
|
|
($thing:ty) => {
|
2020-01-25 04:21:57 +00:00
|
|
|
impl $crate::consensus::Encodable for $thing {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_encode<W: $crate::io::Write + ?Sized>(
|
2019-07-11 14:56:37 +00:00
|
|
|
&self,
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
w: &mut W,
|
2021-06-09 10:40:41 +00:00
|
|
|
) -> Result<usize, $crate::io::Error> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
self.consensus_encode_map(w)
|
2018-09-09 04:07:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2018-09-09 04:20:29 +00:00
|
|
|
|
|
|
|
macro_rules! impl_psbtmap_consensus_decoding {
|
|
|
|
($thing:ty) => {
|
2020-01-25 04:21:57 +00:00
|
|
|
impl $crate::consensus::Decodable for $thing {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_decode<R: $crate::io::Read + ?Sized>(
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
r: &mut R,
|
2020-01-25 04:21:57 +00:00
|
|
|
) -> Result<Self, $crate::consensus::encode::Error> {
|
2022-06-01 22:08:56 +00:00
|
|
|
let mut rv: Self = core::default::Default::default();
|
2018-09-09 04:20:29 +00:00
|
|
|
|
|
|
|
loop {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
match $crate::consensus::Decodable::consensus_decode(r) {
|
2022-01-05 03:37:39 +00:00
|
|
|
Ok(pair) => rv.insert_pair(pair)?,
|
2022-08-05 03:23:03 +00:00
|
|
|
Err($crate::consensus::encode::Error::Psbt($crate::psbt::Error::NoMorePairs)) => return Ok(rv),
|
2018-09-09 04:20:29 +00:00
|
|
|
Err(e) => return Err(e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbtmap_consensus_enc_dec_oding {
|
|
|
|
($thing:ty) => {
|
|
|
|
impl_psbtmap_consensus_decoding!($thing);
|
|
|
|
impl_psbtmap_consensus_encoding!($thing);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-05-25 03:13:13 +00:00
|
|
|
#[rustfmt::skip]
|
2018-09-09 04:20:29 +00:00
|
|
|
macro_rules! impl_psbt_insert_pair {
|
|
|
|
($slf:ident.$unkeyed_name:ident <= <$raw_key:ident: _>|<$raw_value:ident: $unkeyed_value_type:ty>) => {
|
|
|
|
if $raw_key.key.is_empty() {
|
2019-08-05 19:41:07 +00:00
|
|
|
if $slf.$unkeyed_name.is_none() {
|
2022-08-05 03:23:03 +00:00
|
|
|
let val: $unkeyed_value_type = $crate::psbt::serialize::Deserialize::deserialize(&$raw_value)?;
|
2018-09-09 04:20:29 +00:00
|
|
|
$slf.$unkeyed_name = Some(val)
|
|
|
|
} else {
|
2022-08-05 03:23:03 +00:00
|
|
|
return Err($crate::psbt::Error::DuplicateKey($raw_key).into());
|
2018-09-09 04:20:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
2022-08-05 03:23:03 +00:00
|
|
|
return Err($crate::psbt::Error::InvalidKey($raw_key).into());
|
2018-09-09 04:20:29 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
($slf:ident.$keyed_name:ident <= <$raw_key:ident: $keyed_key_type:ty>|<$raw_value:ident: $keyed_value_type:ty>) => {
|
|
|
|
if !$raw_key.key.is_empty() {
|
2022-08-05 03:23:03 +00:00
|
|
|
let key_val: $keyed_key_type = $crate::psbt::serialize::Deserialize::deserialize(&$raw_key.key)?;
|
2019-08-05 18:35:51 +00:00
|
|
|
match $slf.$keyed_name.entry(key_val) {
|
2021-06-09 10:34:44 +00:00
|
|
|
$crate::prelude::btree_map::Entry::Vacant(empty_key) => {
|
2022-08-05 03:23:03 +00:00
|
|
|
let val: $keyed_value_type = $crate::psbt::serialize::Deserialize::deserialize(&$raw_value)?;
|
2019-08-05 18:35:51 +00:00
|
|
|
empty_key.insert(val);
|
|
|
|
}
|
2022-08-05 03:23:03 +00:00
|
|
|
$crate::prelude::btree_map::Entry::Occupied(_) => return Err($crate::psbt::Error::DuplicateKey($raw_key).into()),
|
2018-09-09 04:20:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
2022-08-05 03:23:03 +00:00
|
|
|
return Err($crate::psbt::Error::InvalidKey($raw_key).into());
|
2018-09-09 04:20:29 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-05-25 03:13:13 +00:00
|
|
|
#[rustfmt::skip]
|
2018-09-09 04:20:29 +00:00
|
|
|
macro_rules! impl_psbt_get_pair {
|
2022-01-15 10:25:48 +00:00
|
|
|
($rv:ident.push($slf:ident.$unkeyed_name:ident, $unkeyed_typeval:ident)) => {
|
2018-09-09 04:20:29 +00:00
|
|
|
if let Some(ref $unkeyed_name) = $slf.$unkeyed_name {
|
2022-08-05 03:23:03 +00:00
|
|
|
$rv.push($crate::psbt::raw::Pair {
|
|
|
|
key: $crate::psbt::raw::Key {
|
2018-09-09 04:20:29 +00:00
|
|
|
type_value: $unkeyed_typeval,
|
|
|
|
key: vec![],
|
|
|
|
},
|
2022-08-05 03:23:03 +00:00
|
|
|
value: $crate::psbt::serialize::Serialize::serialize($unkeyed_name),
|
2018-09-09 04:20:29 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
};
|
2022-01-15 10:25:48 +00:00
|
|
|
($rv:ident.push_map($slf:ident.$keyed_name:ident, $keyed_typeval:ident)) => {
|
2018-09-09 04:20:29 +00:00
|
|
|
for (key, val) in &$slf.$keyed_name {
|
2022-08-05 03:23:03 +00:00
|
|
|
$rv.push($crate::psbt::raw::Pair {
|
|
|
|
key: $crate::psbt::raw::Key {
|
2018-09-09 04:20:29 +00:00
|
|
|
type_value: $keyed_typeval,
|
2022-08-05 03:23:03 +00:00
|
|
|
key: $crate::psbt::serialize::Serialize::serialize(key),
|
2018-09-09 04:20:29 +00:00
|
|
|
},
|
2022-08-05 03:23:03 +00:00
|
|
|
value: $crate::psbt::serialize::Serialize::serialize(val),
|
2018-09-09 04:20:29 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2020-08-31 20:06:21 +00:00
|
|
|
|
|
|
|
// macros for serde of hashes
|
|
|
|
macro_rules! impl_psbt_hash_de_serialize {
|
|
|
|
($hash_type:ty) => {
|
|
|
|
impl_psbt_hash_serialize!($hash_type);
|
|
|
|
impl_psbt_hash_deserialize!($hash_type);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbt_hash_deserialize {
|
|
|
|
($hash_type:ty) => {
|
2022-08-05 03:23:03 +00:00
|
|
|
impl $crate::psbt::serialize::Deserialize for $hash_type {
|
2020-08-31 20:06:21 +00:00
|
|
|
fn deserialize(bytes: &[u8]) -> Result<Self, $crate::consensus::encode::Error> {
|
|
|
|
<$hash_type>::from_slice(&bytes[..]).map_err(|e| {
|
2022-08-05 03:23:03 +00:00
|
|
|
$crate::psbt::Error::from(e).into()
|
2020-08-31 20:06:21 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! impl_psbt_hash_serialize {
|
|
|
|
($hash_type:ty) => {
|
2022-08-05 03:23:03 +00:00
|
|
|
impl $crate::psbt::serialize::Serialize for $hash_type {
|
2021-06-09 10:34:44 +00:00
|
|
|
fn serialize(&self) -> $crate::prelude::Vec<u8> {
|
2020-08-31 20:06:21 +00:00
|
|
|
self.into_inner().to_vec()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|