Use less vertical lines

In this library we specifically do not use rustfmt and tend to favour
terse statements that do not use extra lines unnecessarily. In order to
help new devs understand the style modify code that seems to use an
unnecessary number of lines.

None of these changes should reduce the readability of the code.
This commit is contained in:
Tobin Harding 2022-01-24 11:31:39 +11:00
parent a5c06e0a96
commit 71cf00a314
15 changed files with 59 additions and 195 deletions

View File

@ -1355,38 +1355,19 @@ mod test {
let slop_v_nonmin: Result<Vec<Instruction>, Error> = nonminimal.instructions().collect();
let slop_v_nonmin_alt: Result<Vec<Instruction>, Error> = nonminimal_alt.instructions().collect();
assert_eq!(
v_zero.unwrap(),
vec![
Instruction::PushBytes(&[]),
]
);
assert_eq!(
v_zeropush.unwrap(),
vec![
Instruction::PushBytes(&[0]),
]
);
assert_eq!(v_zero.unwrap(), vec![Instruction::PushBytes(&[])]);
assert_eq!(v_zeropush.unwrap(), vec![Instruction::PushBytes(&[0])]);
assert_eq!(
v_min.clone().unwrap(),
vec![
Instruction::PushBytes(&[105]),
Instruction::Op(opcodes::OP_NOP3),
]
vec![Instruction::PushBytes(&[105]), Instruction::Op(opcodes::OP_NOP3)]
);
assert_eq!(
v_nonmin.err().unwrap(),
Error::NonMinimalPush
);
assert_eq!(v_nonmin.err().unwrap(), Error::NonMinimalPush);
assert_eq!(
v_nonmin_alt.clone().unwrap(),
vec![
Instruction::PushBytes(&[105, 0]),
Instruction::Op(opcodes::OP_NOP3),
]
vec![Instruction::PushBytes(&[105, 0]), Instruction::Op(opcodes::OP_NOP3)]
);
assert_eq!(v_min.clone().unwrap(), slop_v_min.unwrap());

View File

@ -59,10 +59,7 @@ impl OutPoint {
/// Creates a new [`OutPoint`].
#[inline]
pub fn new(txid: Txid, vout: u32) -> OutPoint {
OutPoint {
txid,
vout,
}
OutPoint { txid, vout }
}
/// Creates a "null" `OutPoint`.
@ -639,9 +636,7 @@ impl Decodable for Transaction {
}
}
// We don't support anything else
x => {
Err(encode::Error::UnsupportedSegwitFlag(x))
}
x => Err(encode::Error::UnsupportedSegwitFlag(x)),
}
// non-segwit
} else {

View File

@ -202,10 +202,8 @@ impl Witness {
self.last = self.content.len();
let element_len_varint = VarInt(new_element.len() as u64);
let current_content_len = self.content.len();
self.content.resize(
current_content_len + element_len_varint.len() + new_element.len(),
0,
);
self.content
.resize(current_content_len + element_len_varint.len() + new_element.len(), 0);
let end_varint = current_content_len + element_len_varint.len();
element_len_varint
.consensus_encode(&mut self.content[current_content_len..end_varint])
@ -359,14 +357,9 @@ mod test {
for (i, wit_el) in tx.input[0].witness.iter().enumerate() {
assert_eq!(expected_wit[i], wit_el.to_hex());
}
assert_eq!(
expected_wit[1],
tx.input[0].witness.last().unwrap().to_hex()
);
assert_eq!(
expected_wit[0],
tx.input[0].witness.second_to_last().unwrap().to_hex()
);
assert_eq!(expected_wit[1], tx.input[0].witness.last().unwrap().to_hex());
assert_eq!(expected_wit[0], tx.input[0].witness.second_to_last().unwrap().to_hex());
let tx_bytes_back = serialize(&tx);
assert_eq!(tx_bytes_back, tx_bytes);
}

View File

@ -164,9 +164,7 @@ pub fn deserialize<T: Decodable>(data: &[u8]) -> Result<T, Error> {
/// Deserialize an object from a vector, but will not report an error if said deserialization
/// doesn't consume the entire vector.
pub fn deserialize_partial<T: Decodable>(
data: &[u8],
) -> Result<(T, usize), Error> {
pub fn deserialize_partial<T: Decodable>(data: &[u8]) -> Result<(T, usize), Error> {
let mut decoder = Cursor::new(data);
let rv = Decodable::consensus_decode(&mut decoder)?;
let consumed = decoder.position() as usize;

View File

@ -58,10 +58,7 @@ impl Address {
if addr[0..3] == ONION {
return Err(io::Error::from(io::ErrorKind::AddrNotAvailable));
}
let ipv6 = Ipv6Addr::new(
addr[0],addr[1],addr[2],addr[3],
addr[4],addr[5],addr[6],addr[7]
);
let ipv6 = Ipv6Addr::new(addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7]);
if let Some(ipv4) = ipv6.to_ipv4() {
Ok(SocketAddr::V4(SocketAddrV4::new(ipv4, self.port)))
} else {
@ -190,10 +187,7 @@ impl Decodable for AddrV2 {
if addr[0..6] == [0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF] {
return Err(encode::Error::ParseFailed("IPV4 wrapped address sent with IPv6 network id"));
}
AddrV2::Ipv6(Ipv6Addr::new(
addr[0],addr[1],addr[2],addr[3],
addr[4],addr[5],addr[6],addr[7]
))
AddrV2::Ipv6(Ipv6Addr::new(addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7]))
},
3 => {
if len != 10 {
@ -226,10 +220,7 @@ impl Decodable for AddrV2 {
return Err(encode::Error::ParseFailed("Invalid CJDNS address"));
}
let addr = addr_to_be(addr);
AddrV2::Cjdns(Ipv6Addr::new(
addr[0],addr[1],addr[2],addr[3],
addr[4],addr[5],addr[6],addr[7]
))
AddrV2::Cjdns(Ipv6Addr::new(addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7]))
},
_ => {
// len already checked above to be <= 512

View File

@ -293,39 +293,16 @@ mod tests {
#[test]
fn serialize_test() {
assert_eq!(
serialize(&Network::Bitcoin.magic()),
&[0xf9, 0xbe, 0xb4, 0xd9]
);
assert_eq!(
serialize(&Network::Testnet.magic()),
&[0x0b, 0x11, 0x09, 0x07]
);
assert_eq!(
serialize(&Network::Signet.magic()),
&[0x0a, 0x03, 0xcf, 0x40]
);
assert_eq!(
serialize(&Network::Regtest.magic()),
&[0xfa, 0xbf, 0xb5, 0xda]
);
assert_eq!(serialize(&Network::Bitcoin.magic()), &[0xf9, 0xbe, 0xb4, 0xd9]);
assert_eq!(serialize(&Network::Testnet.magic()), &[0x0b, 0x11, 0x09, 0x07]);
assert_eq!(serialize(&Network::Signet.magic()), &[0x0a, 0x03, 0xcf, 0x40]);
assert_eq!(serialize(&Network::Regtest.magic()), &[0xfa, 0xbf, 0xb5, 0xda]);
assert_eq!(deserialize(&[0xf9, 0xbe, 0xb4, 0xd9]).ok(), Some(Network::Bitcoin.magic()));
assert_eq!(deserialize(&[0x0b, 0x11, 0x09, 0x07]).ok(), Some(Network::Testnet.magic()));
assert_eq!(deserialize(&[0x0a, 0x03, 0xcf, 0x40]).ok(), Some(Network::Signet.magic()));
assert_eq!(deserialize(&[0xfa, 0xbf, 0xb5, 0xda]).ok(), Some(Network::Regtest.magic()));
assert_eq!(
deserialize(&[0xf9, 0xbe, 0xb4, 0xd9]).ok(),
Some(Network::Bitcoin.magic())
);
assert_eq!(
deserialize(&[0x0b, 0x11, 0x09, 0x07]).ok(),
Some(Network::Testnet.magic())
);
assert_eq!(
deserialize(&[0x0a, 0x03, 0xcf, 0x40]).ok(),
Some(Network::Signet.magic())
);
assert_eq!(
deserialize(&[0xfa, 0xbf, 0xb5, 0xda]).ok(),
Some(Network::Regtest.magic())
);
}
#[test]

View File

@ -57,8 +57,7 @@ impl Encodable for Inventory {
fn consensus_encode<S: io::Write>(&self, mut s: S) -> Result<usize, io::Error> {
macro_rules! encode_inv {
($code:expr, $item:expr) => {
u32::consensus_encode(&$code, &mut s)? +
$item.consensus_encode(&mut s)?
u32::consensus_encode(&$code, &mut s)? + $item.consensus_encode(&mut s)?
}
}
Ok(match *self {

View File

@ -94,17 +94,10 @@ impl fmt::Display for Error {
Error::InvalidWitnessVersion(v) => write!(f, "invalid witness script version: {}", v),
Error::UnparsableWitnessVersion(_) => write!(f, "incorrect format of a witness version byte"),
Error::MalformedWitnessVersion => f.write_str("bitcoin script opcode does not match any known witness version, the script is malformed"),
Error::InvalidWitnessProgramLength(l) => write!(f,
"the witness program must be between 2 and 40 bytes in length: length={}", l,
),
Error::InvalidSegwitV0ProgramLength(l) => write!(f,
"a v0 witness program must be either of length 20 or 32 bytes: length={}", l,
),
Error::UncompressedPubkey => write!(f,
"an uncompressed pubkey was used where it is not allowed",
),
Error::ExcessiveScriptSize => write!(f,
"Script size exceed 520 bytes")
Error::InvalidWitnessProgramLength(l) => write!(f, "the witness program must be between 2 and 40 bytes in length: length={}", l),
Error::InvalidSegwitV0ProgramLength(l) => write!(f, "a v0 witness program must be either of length 20 or 32 bytes: length={}", l),
Error::UncompressedPubkey => write!(f, "an uncompressed pubkey was used where it is not allowed"),
Error::ExcessiveScriptSize => write!(f, "Script size exceed 520 bytes"),
}
}
}
@ -395,14 +388,11 @@ impl Payload {
/// Generates a script pubkey spending to this [Payload].
pub fn script_pubkey(&self) -> script::Script {
match *self {
Payload::PubkeyHash(ref hash) =>
script::Script::new_p2pkh(hash),
Payload::ScriptHash(ref hash) =>
script::Script::new_p2sh(hash),
Payload::WitnessProgram {
version,
program: ref prog,
} => script::Script::new_witness_program(version, prog)
Payload::PubkeyHash(ref hash) => script::Script::new_p2pkh(hash),
Payload::ScriptHash(ref hash) => script::Script::new_p2sh(hash),
Payload::WitnessProgram { version, program: ref prog } => {
script::Script::new_witness_program(version, prog)
}
}
}
@ -622,10 +612,7 @@ impl Address {
/// Creates a pay to taproot address from a pre-tweaked output key.
///
/// This method is not recommended for use, [`Address::p2tr()`] should be used where possible.
pub fn p2tr_tweaked(
output_key: TweakedPublicKey,
network: Network
) -> Address {
pub fn p2tr_tweaked(output_key: TweakedPublicKey, network: Network) -> Address {
Address {
network,
payload: Payload::p2tr_tweaked(output_key),

View File

@ -961,10 +961,7 @@ impl<T> CheckedSum<SignedAmount> for T where T: Iterator<Item = SignedAmount> {
fn checked_sum(mut self) -> Option<SignedAmount> {
let first = Some(self.next().unwrap_or_default());
self.fold(
first,
|acc, item| acc.and_then(|acc| acc.checked_add(item))
)
self.fold(first, |acc, item| acc.and_then(|acc| acc.checked_add(item)))
}
}
@ -1534,10 +1531,7 @@ mod tests {
samt: SignedAmount::from_sat(-123456789),
},
&[
serde_test::Token::Struct {
name: "T",
len: 2,
},
serde_test::Token::Struct { name: "T", len: 2 },
serde_test::Token::Str("amt"),
serde_test::Token::U64(123456789),
serde_test::Token::Str("samt"),

View File

@ -913,23 +913,14 @@ mod tests {
let mut pk = ExtendedPubKey::from_priv(secp, &sk);
// Check derivation convenience method for ExtendedPrivKey
assert_eq!(
&sk.derive_priv(secp, &path).unwrap().to_string()[..],
expected_sk
);
assert_eq!(&sk.derive_priv(secp, &path).unwrap().to_string()[..], expected_sk);
// Check derivation convenience method for ExtendedPubKey, should error
// appropriately if any ChildNumber is hardened
if path.0.iter().any(|cnum| cnum.is_hardened()) {
assert_eq!(
pk.derive_pub(secp, &path),
Err(Error::CannotDeriveFromHardenedKey)
);
assert_eq!(pk.derive_pub(secp, &path), Err(Error::CannotDeriveFromHardenedKey));
} else {
assert_eq!(
&pk.derive_pub(secp, &path).unwrap().to_string()[..],
expected_pk
);
assert_eq!(&pk.derive_pub(secp, &path).unwrap().to_string()[..], expected_pk);
}
// Derive keys, checking hardened and non-hardened derivation one-by-one

View File

@ -156,11 +156,7 @@ impl PublicKey {
let mut bytes = [0; 65];
reader.read_exact(&mut bytes[0..1])?;
let bytes = if bytes[0] < 4 {
&mut bytes[..33]
} else {
&mut bytes[..65]
};
let bytes = if bytes[0] < 4 { &mut bytes[..33] } else { &mut bytes[..65] };
reader.read_exact(&mut bytes[1..])?;
Self::from_slice(bytes).map_err(|e| {
@ -285,10 +281,7 @@ impl PrivateKey {
/// Deserialize a private key from a slice
pub fn from_slice(data: &[u8], network: Network) -> Result<PrivateKey, Error> {
Ok(PrivateKey::new(
secp256k1::SecretKey::from_slice(data)?,
network,
))
Ok(PrivateKey::new(secp256k1::SecretKey::from_slice(data)?, network))
}
/// Format the private key to WIF format.

View File

@ -189,9 +189,7 @@ impl PartialMerkleTree {
}
// there can never be more hashes provided than one for every txid
if self.hashes.len() as u32 > self.num_transactions {
return Err(BadFormat(
"Proof contains more hashes than transactions".to_owned(),
));
return Err(BadFormat("Proof contains more hashes than transactions".to_owned()));
};
// there must be at least one bit per node in the partial tree, and at least one node per hash
if self.bits.len() < self.hashes.len() {
@ -246,13 +244,7 @@ impl PartialMerkleTree {
}
/// Recursive function that traverses tree nodes, storing the data as bits and hashes
fn traverse_and_build(
&mut self,
height: u32,
pos: u32,
txids: &[Txid],
matches: &[bool],
) {
fn traverse_and_build(&mut self, height: u32, pos: u32, txids: &[Txid], matches: &[bool]) {
// Determine whether this node is the parent of at least one matched txid
let mut parent_of_match = false;
let mut p = pos << height;

View File

@ -342,10 +342,7 @@ mod tests {
inputs: vec![],
outputs: vec![],
};
assert_eq!(
serialize_hex(&psbt),
"70736274ff01000a0200000000000000000000"
);
assert_eq!(serialize_hex(&psbt), "70736274ff01000a0200000000000000000000");
}
#[test]
@ -387,12 +384,8 @@ mod tests {
hd_keypaths.insert(pk.public_key, (fprint, dpath.into()));
let expected: Output = Output {
redeem_script: Some(hex_script!(
"76a914d0c59903c5bac2868760e90fd521a4665aa7652088ac"
)),
witness_script: Some(hex_script!(
"a9143545e6e33b832c47050f24d3eeb93c9c03948bc787"
)),
redeem_script: Some(hex_script!("76a914d0c59903c5bac2868760e90fd521a4665aa7652088ac")),
witness_script: Some(hex_script!("a9143545e6e33b832c47050f24d3eeb93c9c03948bc787")),
bip32_derivation: hd_keypaths,
..Default::default()
};
@ -438,13 +431,8 @@ mod tests {
version: 0,
proprietary: Default::default(),
unknown: Default::default(),
inputs: vec![
Input::default(),
],
outputs: vec![
Output::default(),
Output::default()
]
inputs: vec![Input::default()],
outputs: vec![Output::default(), Output::default()],
};
let actual: PartiallySignedTransaction = deserialize(&serialize(&expected)).unwrap();
@ -801,8 +789,7 @@ mod tests {
let psbt_non_witness_utxo = (&psbt.inputs[0].non_witness_utxo).as_ref().unwrap();
assert_eq!(tx_input.previous_output.txid, psbt_non_witness_utxo.txid());
assert!(
psbt_non_witness_utxo.output[tx_input.previous_output.vout as usize]
assert!(psbt_non_witness_utxo.output[tx_input.previous_output.vout as usize]
.script_pubkey
.is_p2pkh()
);
@ -868,9 +855,7 @@ mod tests {
let tx = &psbt.unsigned_tx;
assert_eq!(
tx.txid(),
Txid::from_hex(
"75c5c9665a570569ad77dd1279e6fd4628a093c4dcbf8d41532614044c14c115"
).unwrap()
Txid::from_hex("75c5c9665a570569ad77dd1279e6fd4628a093c4dcbf8d41532614044c14c115").unwrap(),
);
let mut unknown: BTreeMap<raw::Key, Vec<u8>> = BTreeMap::new();

View File

@ -99,10 +99,7 @@ impl Decodable for Key {
key.push(Decodable::consensus_decode(&mut d)?);
}
Ok(Key {
type_value,
key,
})
Ok(Key { type_value, key })
}
}
@ -153,11 +150,7 @@ impl<Subtype> Decodable for ProprietaryKey<Subtype> where Subtype: Copy + From<u
let subtype = Subtype::from(d.read_u8()?);
let key = read_to_end(d)?;
Ok(ProprietaryKey {
prefix,
subtype,
key
})
Ok(ProprietaryKey { prefix, subtype, key })
}
}

View File

@ -598,9 +598,7 @@ impl TaprootMerkleBranch {
if sl.len() % TAPROOT_CONTROL_NODE_SIZE != 0 {
Err(TaprootError::InvalidMerkleBranchSize(sl.len()))
} else if sl.len() > TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT {
Err(TaprootError::InvalidMerkleTreeDepth(
sl.len() / TAPROOT_CONTROL_NODE_SIZE,
))
Err(TaprootError::InvalidMerkleTreeDepth(sl.len() / TAPROOT_CONTROL_NODE_SIZE))
} else {
let inner = sl
// TODO: Use chunks_exact after MSRV changes to 1.31
@ -717,8 +715,7 @@ impl ControlBlock {
/// applied when encoding this element as a witness.
pub fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(self.size());
self.encode(&mut buf)
.expect("writers don't error");
self.encode(&mut buf).expect("writers don't error");
buf
}
@ -939,11 +936,9 @@ impl fmt::Display for TaprootBuilderError {
"Attempted to create a tree with two nodes at depth 0. There must\
only be a exactly one node at depth 0",
),
TaprootBuilderError::InvalidMerkleTreeDepth(d) => write!(
f,
"Merkle Tree depth({}) must be less than {}",
d, TAPROOT_CONTROL_MAX_NODE_COUNT
),
TaprootBuilderError::InvalidMerkleTreeDepth(d) => {
write!(f, "Merkle Tree depth({}) must be less than {}", d, TAPROOT_CONTROL_MAX_NODE_COUNT)
}
TaprootBuilderError::InvalidInternalKey(e) => {
write!(f, "Invalid Internal XOnly key : {}", e)
}