2022-06-29 04:05:31 +00:00
|
|
|
// SPDX-License-Identifier: CC0-1.0
|
2018-09-09 10:34:29 +00:00
|
|
|
|
2021-11-05 21:58:18 +00:00
|
|
|
//! Raw PSBT key-value pairs.
|
2018-08-10 15:28:48 +00:00
|
|
|
//!
|
|
|
|
//! Raw PSBT key-value pairs as defined at
|
2021-05-03 11:46:10 +00:00
|
|
|
//! <https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki>.
|
2021-11-05 21:58:18 +00:00
|
|
|
//!
|
2018-08-10 15:28:48 +00:00
|
|
|
|
2022-05-02 22:13:57 +00:00
|
|
|
use crate::prelude::*;
|
2021-06-09 10:40:41 +00:00
|
|
|
use core::fmt;
|
2022-05-25 06:58:07 +00:00
|
|
|
use core::convert::TryFrom;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
2022-05-02 22:13:57 +00:00
|
|
|
use crate::io;
|
|
|
|
use crate::consensus::encode::{self, ReadExt, WriteExt, Decodable, Encodable, VarInt, serialize, deserialize, MAX_VEC_SIZE};
|
|
|
|
use crate::hashes::hex;
|
|
|
|
use crate::util::psbt::Error;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
/// A PSBT key in its raw byte form.
|
2019-08-09 15:03:12 +00:00
|
|
|
#[derive(Debug, PartialEq, Hash, Eq, Clone, Ord, PartialOrd)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub struct Key {
|
|
|
|
/// The type of this PSBT key.
|
|
|
|
pub type_value: u8,
|
|
|
|
/// The key itself in raw byte form.
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub key: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A PSBT key-value pair in its raw byte form.
|
2022-08-11 19:46:43 +00:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub struct Pair {
|
|
|
|
/// The key of this key-value pair.
|
|
|
|
pub key: Key,
|
|
|
|
/// The value of this key-value pair in raw byte form.
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub value: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
2020-12-05 14:10:44 +00:00
|
|
|
/// Default implementation for proprietary key subtyping
|
|
|
|
pub type ProprietaryType = u8;
|
|
|
|
|
|
|
|
/// Proprietary keys (i.e. keys starting with 0xFC byte) with their internal
|
|
|
|
/// structure according to BIP 174.
|
|
|
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2022-03-16 20:22:03 +00:00
|
|
|
pub struct ProprietaryKey<Subtype=ProprietaryType> where Subtype: Copy + From<u8> + Into<u8> {
|
2020-12-05 14:10:44 +00:00
|
|
|
/// Proprietary type prefix used for grouping together keys under some
|
|
|
|
/// application and avoid namespace collision
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2020-12-05 14:10:44 +00:00
|
|
|
pub prefix: Vec<u8>,
|
|
|
|
/// Custom proprietary subtype
|
|
|
|
pub subtype: Subtype,
|
|
|
|
/// Additional key bytes (like serialized public key data etc)
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2020-12-05 14:10:44 +00:00
|
|
|
pub key: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
2018-08-10 15:28:48 +00:00
|
|
|
impl fmt::Display for Key {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
2020-12-22 12:22:43 +00:00
|
|
|
write!(f, "type: {:#x}, key: ", self.type_value)?;
|
|
|
|
hex::format_hex(&self.key[..], f)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 17:06:42 +00:00
|
|
|
impl Decodable for Key {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_decode<R: io::Read + ?Sized>(r: &mut R) -> Result<Self, encode::Error> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let VarInt(byte_size): VarInt = Decodable::consensus_decode(r)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
if byte_size == 0 {
|
|
|
|
return Err(Error::NoMorePairs.into());
|
|
|
|
}
|
|
|
|
|
|
|
|
let key_byte_size: u64 = byte_size - 1;
|
|
|
|
|
|
|
|
if key_byte_size > MAX_VEC_SIZE as u64 {
|
2019-07-11 17:06:42 +00:00
|
|
|
return Err(encode::Error::OversizedVectorAllocation {
|
|
|
|
requested: key_byte_size as usize,
|
|
|
|
max: MAX_VEC_SIZE,
|
|
|
|
})
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let type_value: u8 = Decodable::consensus_decode(r)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
let mut key = Vec::with_capacity(key_byte_size as usize);
|
|
|
|
for _ in 0..key_byte_size {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
key.push(Decodable::consensus_decode(r)?);
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
2022-01-24 00:31:39 +00:00
|
|
|
Ok(Key { type_value, key })
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:56:37 +00:00
|
|
|
impl Encodable for Key {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_encode<W: io::Write + ?Sized>(&self, w: &mut W) -> Result<usize, io::Error> {
|
2019-05-23 20:28:10 +00:00
|
|
|
let mut len = 0;
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += VarInt((self.key.len() + 1) as u64).consensus_encode(w)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += self.type_value.consensus_encode(w)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
for key in &self.key {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += key.consensus_encode(w)?
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 20:28:10 +00:00
|
|
|
Ok(len)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:56:37 +00:00
|
|
|
impl Encodable for Pair {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_encode<W: io::Write + ?Sized>(&self, w: &mut W) -> Result<usize, io::Error> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let len = self.key.consensus_encode(w)?;
|
|
|
|
Ok(len + self.value.consensus_encode(w)?)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 17:06:42 +00:00
|
|
|
impl Decodable for Pair {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_decode<R: io::Read + ?Sized>(r: &mut R) -> Result<Self, encode::Error> {
|
2018-08-10 15:28:48 +00:00
|
|
|
Ok(Pair {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
key: Decodable::consensus_decode(r)?,
|
|
|
|
value: Decodable::consensus_decode(r)?,
|
2018-08-10 15:28:48 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-12-05 14:10:44 +00:00
|
|
|
|
|
|
|
impl<Subtype> Encodable for ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_encode<W: io::Write + ?Sized>(&self, w: &mut W) -> Result<usize, io::Error> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let mut len = self.prefix.consensus_encode(w)? + 1;
|
|
|
|
w.emit_u8(self.subtype.into())?;
|
|
|
|
w.write_all(&self.key)?;
|
2022-01-25 13:50:06 +00:00
|
|
|
len += self.key.len();
|
2020-12-05 14:10:44 +00:00
|
|
|
Ok(len)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Subtype> Decodable for ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
2022-06-29 01:22:12 +00:00
|
|
|
fn consensus_decode<R: io::Read + ?Sized>(r: &mut R) -> Result<Self, encode::Error> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let prefix = Vec::<u8>::consensus_decode(r)?;
|
|
|
|
let subtype = Subtype::from(r.read_u8()?);
|
|
|
|
let key = read_to_end(r)?;
|
2020-12-05 14:10:44 +00:00
|
|
|
|
2022-01-24 00:31:39 +00:00
|
|
|
Ok(ProprietaryKey { prefix, subtype, key })
|
2020-12-05 14:10:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Subtype> ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
|
|
|
/// Constructs full [Key] corresponding to this proprietary key type
|
|
|
|
pub fn to_key(&self) -> Key {
|
|
|
|
Key {
|
|
|
|
type_value: 0xFC,
|
|
|
|
key: serialize(self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-05-25 06:58:07 +00:00
|
|
|
|
|
|
|
impl<Subtype> TryFrom<Key> for ProprietaryKey<Subtype>
|
|
|
|
where
|
|
|
|
Subtype:Copy + From<u8> + Into<u8> {
|
|
|
|
type Error = Error;
|
|
|
|
|
|
|
|
/// Constructs a [`ProprietaryKey`] from a [`Key`].
|
|
|
|
///
|
|
|
|
/// # Errors
|
|
|
|
/// Returns [`Error::InvalidProprietaryKey`] if `key` does not start with `0xFC` byte.
|
|
|
|
fn try_from(key: Key) -> Result<Self, Self::Error> {
|
|
|
|
if key.type_value != 0xFC {
|
|
|
|
return Err(Error::InvalidProprietaryKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(deserialize(&key.key)?)
|
|
|
|
}
|
|
|
|
}
|
2022-08-02 06:10:30 +00:00
|
|
|
|
|
|
|
// core2 doesn't have read_to_end
|
|
|
|
pub(crate) fn read_to_end<D: io::Read>(mut d: D) -> Result<Vec<u8>, io::Error> {
|
|
|
|
let mut result = vec![];
|
|
|
|
let mut buf = [0u8; 64];
|
|
|
|
loop {
|
|
|
|
match d.read(&mut buf) {
|
|
|
|
Ok(0) => break,
|
|
|
|
Ok(n) => result.extend_from_slice(&buf[0..n]),
|
|
|
|
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {},
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
Ok(result)
|
|
|
|
}
|