2018-09-09 10:34:29 +00:00
|
|
|
// Rust Bitcoin Library
|
|
|
|
// Written by
|
|
|
|
// The Rust Bitcoin developers
|
|
|
|
//
|
|
|
|
// To the extent possible under law, the author(s) have dedicated all
|
|
|
|
// copyright and related and neighboring rights to this software to
|
|
|
|
// the public domain worldwide. This software is distributed without
|
|
|
|
// any warranty.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the CC0 Public Domain Dedication
|
|
|
|
// along with this software.
|
|
|
|
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
|
|
|
|
//
|
|
|
|
|
2021-11-05 21:58:18 +00:00
|
|
|
//! Raw PSBT key-value pairs.
|
2018-08-10 15:28:48 +00:00
|
|
|
//!
|
|
|
|
//! Raw PSBT key-value pairs as defined at
|
2021-05-03 11:46:10 +00:00
|
|
|
//! <https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki>.
|
2021-11-05 21:58:18 +00:00
|
|
|
//!
|
2018-08-10 15:28:48 +00:00
|
|
|
|
2022-05-02 22:13:57 +00:00
|
|
|
use crate::prelude::*;
|
2021-06-09 10:40:41 +00:00
|
|
|
use core::fmt;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
2022-05-02 22:13:57 +00:00
|
|
|
use crate::io;
|
|
|
|
use crate::consensus::encode::{self, ReadExt, WriteExt, Decodable, Encodable, VarInt, serialize, deserialize, MAX_VEC_SIZE};
|
|
|
|
use crate::hashes::hex;
|
|
|
|
use crate::util::psbt::Error;
|
|
|
|
use crate::util::read_to_end;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
/// A PSBT key in its raw byte form.
|
2019-08-09 15:03:12 +00:00
|
|
|
#[derive(Debug, PartialEq, Hash, Eq, Clone, Ord, PartialOrd)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub struct Key {
|
|
|
|
/// The type of this PSBT key.
|
|
|
|
pub type_value: u8,
|
|
|
|
/// The key itself in raw byte form.
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub key: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A PSBT key-value pair in its raw byte form.
|
|
|
|
#[derive(Debug, PartialEq)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub struct Pair {
|
|
|
|
/// The key of this key-value pair.
|
|
|
|
pub key: Key,
|
|
|
|
/// The value of this key-value pair in raw byte form.
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2018-08-10 15:28:48 +00:00
|
|
|
pub value: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
2020-12-05 14:10:44 +00:00
|
|
|
/// Default implementation for proprietary key subtyping
|
|
|
|
pub type ProprietaryType = u8;
|
|
|
|
|
|
|
|
/// Proprietary keys (i.e. keys starting with 0xFC byte) with their internal
|
|
|
|
/// structure according to BIP 174.
|
|
|
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
2020-10-25 18:03:20 +00:00
|
|
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
2022-05-25 06:41:59 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(crate = "actual_serde"))]
|
2022-03-16 20:22:03 +00:00
|
|
|
pub struct ProprietaryKey<Subtype=ProprietaryType> where Subtype: Copy + From<u8> + Into<u8> {
|
2020-12-05 14:10:44 +00:00
|
|
|
/// Proprietary type prefix used for grouping together keys under some
|
|
|
|
/// application and avoid namespace collision
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2020-12-05 14:10:44 +00:00
|
|
|
pub prefix: Vec<u8>,
|
|
|
|
/// Custom proprietary subtype
|
|
|
|
pub subtype: Subtype,
|
|
|
|
/// Additional key bytes (like serialized public key data etc)
|
2022-05-02 23:02:58 +00:00
|
|
|
#[cfg_attr(feature = "serde", serde(with = "crate::serde_utils::hex_bytes"))]
|
2020-12-05 14:10:44 +00:00
|
|
|
pub key: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
2018-08-10 15:28:48 +00:00
|
|
|
impl fmt::Display for Key {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
2020-12-22 12:22:43 +00:00
|
|
|
write!(f, "type: {:#x}, key: ", self.type_value)?;
|
|
|
|
hex::format_hex(&self.key[..], f)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 17:06:42 +00:00
|
|
|
impl Decodable for Key {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_decode<R: io::Read>(r: &mut R) -> Result<Self, encode::Error> {
|
|
|
|
let VarInt(byte_size): VarInt = Decodable::consensus_decode(r)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
if byte_size == 0 {
|
|
|
|
return Err(Error::NoMorePairs.into());
|
|
|
|
}
|
|
|
|
|
|
|
|
let key_byte_size: u64 = byte_size - 1;
|
|
|
|
|
|
|
|
if key_byte_size > MAX_VEC_SIZE as u64 {
|
2019-07-11 17:06:42 +00:00
|
|
|
return Err(encode::Error::OversizedVectorAllocation {
|
|
|
|
requested: key_byte_size as usize,
|
|
|
|
max: MAX_VEC_SIZE,
|
|
|
|
})
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
let type_value: u8 = Decodable::consensus_decode(r)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
let mut key = Vec::with_capacity(key_byte_size as usize);
|
|
|
|
for _ in 0..key_byte_size {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
key.push(Decodable::consensus_decode(r)?);
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
2022-01-24 00:31:39 +00:00
|
|
|
Ok(Key { type_value, key })
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:56:37 +00:00
|
|
|
impl Encodable for Key {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_encode<W: io::Write>(&self, w: &mut W) -> Result<usize, io::Error> {
|
2019-05-23 20:28:10 +00:00
|
|
|
let mut len = 0;
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += VarInt((self.key.len() + 1) as u64).consensus_encode(w)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += self.type_value.consensus_encode(w)?;
|
2018-08-10 15:28:48 +00:00
|
|
|
|
|
|
|
for key in &self.key {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
len += key.consensus_encode(w)?
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 20:28:10 +00:00
|
|
|
Ok(len)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:56:37 +00:00
|
|
|
impl Encodable for Pair {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_encode<W: io::Write>(&self, w: &mut W) -> Result<usize, io::Error> {
|
|
|
|
let len = self.key.consensus_encode(w)?;
|
|
|
|
Ok(len + self.value.consensus_encode(w)?)
|
2018-08-10 15:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 17:06:42 +00:00
|
|
|
impl Decodable for Pair {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_decode<R: io::Read>(r: &mut R) -> Result<Self, encode::Error> {
|
2018-08-10 15:28:48 +00:00
|
|
|
Ok(Pair {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
key: Decodable::consensus_decode(r)?,
|
|
|
|
value: Decodable::consensus_decode(r)?,
|
2018-08-10 15:28:48 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-12-05 14:10:44 +00:00
|
|
|
|
|
|
|
impl<Subtype> Encodable for ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_encode<W: io::Write>(&self, w: &mut W) -> Result<usize, io::Error> {
|
|
|
|
let mut len = self.prefix.consensus_encode(w)? + 1;
|
|
|
|
w.emit_u8(self.subtype.into())?;
|
|
|
|
w.write_all(&self.key)?;
|
2022-01-25 13:50:06 +00:00
|
|
|
len += self.key.len();
|
2020-12-05 14:10:44 +00:00
|
|
|
Ok(len)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Subtype> Decodable for ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
Take Writer/Reader by `&mut` in consensus en/decoding
Fix #1020 (see more relevant discussion there)
This definitely makes the amount of generics compiler
has to generate by avoding generating the same functions
for `R`, &mut R`, `&mut &mut R` and so on.
old:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9947832 Jun 2 22:42 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4463024 Jun 2 22:46 target/release/deps/bitcoin-07a9dabf1f3e0266
```
new:
```
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 9866800 Jun 2 22:44 target/release/deps/bitcoin-07a9dabf1f3e0266
> strip target/release/deps/bitcoin-07a9dabf1f3e0266
> ls -al target/release/deps/bitcoin-07a9dabf1f3e0266
-rwxrwxr-x 1 dpc dpc 4393392 Jun 2 22:45 target/release/deps/bitcoin-07a9dabf1f3e0266
```
In the unit-test binary itself, it saves ~100KB of data.
I did not expect much performance gains, but turn out I was wrong(*):
old:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,072,710 ns/iter (+/- 21,871)
test blockdata::block::benches::bench_block_serialize ... bench: 191,223 ns/iter (+/- 5,833)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 37,543 ns/iter (+/- 732)
test blockdata::block::benches::bench_stream_reader ... bench: 1,872,455 ns/iter (+/- 149,519)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 136 ns/iter (+/- 3)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 51 ns/iter (+/- 8)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 3 ns/iter (+/- 0)
```
new:
```
test blockdata::block::benches::bench_block_deserialize ... bench: 1,028,574 ns/iter (+/- 10,910)
test blockdata::block::benches::bench_block_serialize ... bench: 162,143 ns/iter (+/- 3,363)
test blockdata::block::benches::bench_block_serialize_logic ... bench: 30,725 ns/iter (+/- 695)
test blockdata::block::benches::bench_stream_reader ... bench: 1,437,071 ns/iter (+/- 53,694)
test blockdata::transaction::benches::bench_transaction_deserialize ... bench: 92 ns/iter (+/- 2)
test blockdata::transaction::benches::bench_transaction_serialize ... bench: 17 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_serialize_logic ... bench: 5 ns/iter (+/- 0)
test blockdata::transaction::benches::bench_transaction_size ... bench: 4 ns/iter (+/- 0)
```
(*) - I'm benchmarking on a noisy laptop. Take this with a grain of salt. But I think
at least it doesn't make anything slower.
While doing all this manual labor that will probably generate conflicts,
I took a liberty of changing generic type names and variable names to
`r` and `R` (reader) and `w` and `W` for writer.
2022-06-03 04:50:42 +00:00
|
|
|
fn consensus_decode<R: io::Read>(r: &mut R) -> Result<Self, encode::Error> {
|
|
|
|
let prefix = Vec::<u8>::consensus_decode(r)?;
|
|
|
|
let subtype = Subtype::from(r.read_u8()?);
|
|
|
|
let key = read_to_end(r)?;
|
2020-12-05 14:10:44 +00:00
|
|
|
|
2022-01-24 00:31:39 +00:00
|
|
|
Ok(ProprietaryKey { prefix, subtype, key })
|
2020-12-05 14:10:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Subtype> ProprietaryKey<Subtype> where Subtype: Copy + From<u8> + Into<u8> {
|
|
|
|
/// Constructs [ProprietaryKey] from [Key]; returns
|
|
|
|
/// [Error::InvalidProprietaryKey] if `key` do not starts with 0xFC byte
|
|
|
|
pub fn from_key(key: Key) -> Result<Self, Error> {
|
|
|
|
if key.type_value != 0xFC {
|
|
|
|
return Err(Error::InvalidProprietaryKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(deserialize(&key.key)?)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Constructs full [Key] corresponding to this proprietary key type
|
|
|
|
pub fn to_key(&self) -> Key {
|
|
|
|
Key {
|
|
|
|
type_value: 0xFC,
|
|
|
|
key: serialize(self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|