From f44111f0c48792620e4d04027c58d1cba4933cb2 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Fri, 8 Sep 2023 11:37:27 +0200
Subject: [PATCH] Improve tests and rename Recorder

---
 Cargo.lock                                    |   1 -
 cumulus/pallets/parachain-system/Cargo.toml   |   2 -
 .../src/validate_block/implementation.rs      |   7 +-
 .../src/validate_block/tests.rs               |  90 +--------
 .../src/validate_block/trie_recorder.rs       | 181 ++++++++++++++++--
 substrate/client/service/src/client/client.rs | 118 +++++-------
 6 files changed, 225 insertions(+), 174 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 04c36109a0c..d4dc84709f0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3556,7 +3556,6 @@ dependencies = [
  "rand 0.8.5",
  "sc-client-api",
  "scale-info",
- "sp-api",
  "sp-core",
  "sp-externalities",
  "sp-inherents",
diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml
index d89d2b2d067..70a4586416e 100644
--- a/cumulus/pallets/parachain-system/Cargo.toml
+++ b/cumulus/pallets/parachain-system/Cargo.toml
@@ -20,7 +20,6 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa
 sp-core = { path = "../../../substrate/primitives/core", default-features = false}
 sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false}
 sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false}
-sp-api = { path = "../../../substrate/primitives/api", default-features = false}
 sp-io = { path = "../../../substrate/primitives/io", default-features = false}
 sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false}
 sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false}
@@ -55,7 +54,6 @@ sp-version = { path = "../../../substrate/primitives/version" }
 cumulus-test-client = { path = "../../test/client" }
 cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" }
 
-
 [features]
 default = [ "std" ]
 std = [
diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs
index 78df38947d4..e6176b05acb 100644
--- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs
+++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs
@@ -34,13 +34,13 @@ use sp_io::KillStorageResult;
 use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT};
 use sp_std::{prelude::*, sync::Arc};
 use sp_trie::{MemoryDB, TrieRecorderProvider};
-use trie_recorder::RecorderProvider;
+use trie_recorder::SizeOnlyRecorderProvider;
 
 type TrieBackend<B> = sp_state_machine::TrieBackend<
 	MemoryDB<HashingFor<B>>,
 	HashingFor<B>,
 	trie_cache::CacheProvider<HashingFor<B>>,
-	RecorderProvider<HashingFor<B>>,
+	SizeOnlyRecorderProvider<HashingFor<B>>,
 >;
 
 type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor<B>, TrieBackend<B>>;
@@ -93,7 +93,6 @@ where
 	B::Extrinsic: ExtrinsicCall,
 	<B::Extrinsic as Extrinsic>::Call: IsSubType<crate::Call<PSC>>,
 {
-	sp_api::init_runtime_logger();
 	let block_data = codec::decode_from_bytes::<ParachainBlockData<B>>(block_data)
 		.expect("Invalid parachain block data");
 
@@ -122,7 +121,7 @@ where
 
 	sp_std::mem::drop(storage_proof);
 
-	let recorder = RecorderProvider::new();
+	let recorder = SizeOnlyRecorderProvider::new();
 	let cache_provider = trie_cache::CacheProvider::new();
 	// We use the storage root of the `parent_head` to ensure that it is the correct root.
 	// This is already being done above while creating the in-memory db, but let's be paranoid!!
diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs
index d6b9cadb15d..0cf68f25cc3 100644
--- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs
+++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs
@@ -26,12 +26,10 @@ use cumulus_test_client::{
 };
 use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
 use sp_keyring::AccountKeyring::*;
-use sp_runtime::traits::{HashingFor, Header as HeaderT};
-use sp_trie::TrieRecorderProvider;
+use sp_runtime::traits::Header as HeaderT;
 use std::{env, process::Command};
-use trie_standardmap::{Alphabet, StandardMap, ValueMode};
 
-use crate::validate_block::{trie_recorder::RecorderProvider, MemoryOptimizedValidationParams};
+use crate::validate_block::MemoryOptimizedValidationParams;
 
 fn call_validate_block_encoded_header(
 	parent_head: Header,
@@ -292,87 +290,3 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode()
 	let decoded = ValidationParams::decode_all(&mut &encoded[..]).unwrap();
 	assert_eq!(decoded, validation_params);
 }
-
-const TEST_DATA: &[(&[u8], &[u8])] =
-	&[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])];
-
-use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder};
-type MemoryDB = sp_trie::MemoryDB<sp_core::Blake2Hasher>;
-type Layout = sp_trie::LayoutV1<sp_core::Blake2Hasher>;
-type Recorder = sp_trie::recorder::Recorder<sp_core::Blake2Hasher>;
-
-fn create_trie() -> (MemoryDB, TrieHash<Layout>, Vec<(Vec<u8>, Vec<u8>)>) {
-	let mut db = MemoryDB::default();
-	let mut root = Default::default();
-
-	let mut seed = Default::default();
-	let x = StandardMap {
-		alphabet: Alphabet::Low,
-		min_key: 5,
-		journal_key: 0,
-		value_mode: ValueMode::Random,
-		count: 1000,
-	}
-	.make_with(&mut seed);
-
-	{
-		let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
-		for (k, v) in x.iter() {
-			trie.insert(k, v).expect("Inserts data");
-		}
-	}
-
-	(db, root, x)
-}
-use rand::Rng;
-use sp_trie::cache::{CacheSize, SharedTrieCache};
-#[test]
-fn recorder_does_its_thing() {
-	sp_tracing::try_init_simple();
-	let (db, root, values) = create_trie();
-
-	let mut rng = rand::thread_rng();
-	for _ in 1..2 {
-		let mut reference_recorder = Recorder::default();
-		let recorder_for_test: RecorderProvider<sp_core::Blake2Hasher> = RecorderProvider::new();
-		let reference_cache: SharedTrieCache<sp_core::Blake2Hasher> =
-			SharedTrieCache::new(CacheSize::new(1024 * 5));
-		let cache_for_test: SharedTrieCache<sp_core::Blake2Hasher> =
-			SharedTrieCache::new(CacheSize::new(1024 * 5));
-		{
-			let mut local_cache = cache_for_test.local_cache();
-			let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root);
-			let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
-			let reference_trie = TrieDBBuilder::<Layout>::new(&db, &root)
-				.with_recorder(&mut reference_trie_recorder)
-				.with_cache(&mut trie_cache_for_reference)
-				.build();
-
-			let mut local_cache_for_test = cache_for_test.local_cache();
-			let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root);
-			let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
-			let test_trie = TrieDBBuilder::<Layout>::new(&db, &root)
-				.with_recorder(&mut trie_recorder_under_test)
-				.with_cache(&mut trie_cache_for_test)
-				.build();
-
-			log::info!("just get");
-			for _ in 0..10 {
-				let index: usize = rng.gen_range(0..values.len());
-				test_trie.get(&values[index].0).unwrap().unwrap();
-				reference_trie.get(&values[index].0).unwrap().unwrap();
-			}
-
-			log::info!("hash access");
-			for _ in 0..10 {
-				let index: usize = rng.gen_range(0..values.len());
-				test_trie.get_hash(&values[index].0);
-				reference_trie.get_hash(&values[index].0);
-			}
-		}
-		assert_eq!(
-			reference_recorder.estimate_encoded_size(),
-			recorder_for_test.estimate_encoded_size()
-		);
-	}
-}
diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs
index 28ceb9ea7a0..95629750434 100644
--- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs
+++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs
@@ -28,28 +28,27 @@ use sp_trie::{NodeCodec, StorageProof};
 use trie_db::{Hasher, RecordedForKey, TrieAccess};
 
 /// A trie recorder that only keeps track of the proof size.
-pub(crate) struct SizeRecorder<'a, H: Hasher> {
+pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> {
 	seen_nodes: RefMut<'a, BTreeSet<H::Out>>,
 	encoded_size: RefMut<'a, usize>,
 	recorded_keys: RefMut<'a, BTreeMap<Arc<[u8]>, RecordedForKey>>,
 }
 
-impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder<H::Out> for SizeRecorder<'a, H> {
+impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder<H::Out> for SizeOnlyRecorder<'a, H> {
 	fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) {
 		let mut encoded_size_update = 0;
 		match access {
-			TrieAccess::NodeOwned { hash, node_owned } =>
+			TrieAccess::NodeOwned { hash, node_owned } => {
 				if !self.seen_nodes.get(&hash).is_some() {
 					let node = node_owned.to_encoded::<NodeCodec<H>>();
-					log::info!(target: "skunert", "TrieAccess::NodeOwned");
 					encoded_size_update += node.encoded_size();
 					self.seen_nodes.insert(hash);
-				},
+				}
+			},
 			TrieAccess::EncodedNode { hash, encoded_node } => {
 				if !self.seen_nodes.get(&hash).is_some() {
 					let node = encoded_node.into_owned();
 
-					log::info!(target: "skunert", "TrieAccess::EncodedNode");
 					encoded_size_update += node.encoded_size();
 					self.seen_nodes.insert(hash);
 				}
@@ -57,7 +56,6 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder<H::Out> for SizeRecorder<'a,
 			TrieAccess::Value { hash, value, full_key } => {
 				if !self.seen_nodes.get(&hash).is_some() {
 					let value = value.into_owned();
-					log::info!(target: "skunert", "TrieAccess::Value");
 					encoded_size_update += value.encoded_size();
 					self.seen_nodes.insert(hash);
 				}
@@ -87,13 +85,13 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder<H::Out> for SizeRecorder<'a,
 	}
 }
 
-pub(crate) struct RecorderProvider<H: Hasher> {
+pub(crate) struct SizeOnlyRecorderProvider<H: Hasher> {
 	seen_nodes: RefCell<BTreeSet<H::Out>>,
 	encoded_size: RefCell<usize>,
 	recorded_keys: RefCell<BTreeMap<Arc<[u8]>, RecordedForKey>>,
 }
 
-impl<H: Hasher> RecorderProvider<H> {
+impl<H: Hasher> SizeOnlyRecorderProvider<H> {
 	pub fn new() -> Self {
 		Self {
 			seen_nodes: Default::default(),
@@ -103,15 +101,15 @@ impl<H: Hasher> RecorderProvider<H> {
 	}
 }
 
-impl<H: trie_db::Hasher> sp_trie::TrieRecorderProvider<H> for RecorderProvider<H> {
-	type Recorder<'a> = SizeRecorder<'a, H> where H: 'a;
+impl<H: trie_db::Hasher> sp_trie::TrieRecorderProvider<H> for SizeOnlyRecorderProvider<H> {
+	type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a;
 
 	fn drain_storage_proof(self) -> StorageProof {
 		unimplemented!("Draining storage proof not supported!")
 	}
 
 	fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> {
-		SizeRecorder {
+		SizeOnlyRecorder {
 			encoded_size: self.encoded_size.borrow_mut(),
 			seen_nodes: self.seen_nodes.borrow_mut(),
 			recorded_keys: self.recorded_keys.borrow_mut(),
@@ -124,5 +122,160 @@ impl<H: trie_db::Hasher> sp_trie::TrieRecorderProvider<H> for RecorderProvider<H
 }
 
 // This is safe here since we are single-threaded in WASM
-unsafe impl<H: Hasher> Send for RecorderProvider<H> {}
-unsafe impl<H: Hasher> Sync for RecorderProvider<H> {}
+unsafe impl<H: Hasher> Send for SizeOnlyRecorderProvider<H> {}
+unsafe impl<H: Hasher> Sync for SizeOnlyRecorderProvider<H> {}
+
+#[cfg(test)]
+mod tests {
+	use rand::Rng;
+	use sp_trie::{
+		cache::{CacheSize, SharedTrieCache},
+		MemoryDB, TrieRecorderProvider,
+	};
+	use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder};
+	use trie_standardmap::{Alphabet, StandardMap, ValueMode};
+
+	use crate::validate_block::trie_recorder::SizeOnlyRecorderProvider;
+
+	type Recorder = sp_trie::recorder::Recorder<sp_core::Blake2Hasher>;
+
+	fn create_trie() -> (
+		sp_trie::MemoryDB<sp_core::Blake2Hasher>,
+		TrieHash<sp_trie::LayoutV1<sp_core::Blake2Hasher>>,
+		Vec<(Vec<u8>, Vec<u8>)>,
+	) {
+		let mut db = MemoryDB::default();
+		let mut root = Default::default();
+
+		let mut seed = Default::default();
+		let test_data: Vec<(Vec<u8>, Vec<u8>)> = StandardMap {
+			alphabet: Alphabet::Low,
+			min_key: 16,
+			journal_key: 0,
+			value_mode: ValueMode::Random,
+			count: 1000,
+		}
+		.make_with(&mut seed)
+		.into_iter()
+		.map(|(k, v)| {
+			// Double the length so we end up with some values of 2 bytes and some of 64
+			let v = [v.clone(), v].concat();
+			(k, v)
+		})
+		.collect();
+
+		// Fill database with values
+		{
+			let mut trie = TrieDBMutBuilder::<sp_trie::LayoutV1<sp_core::Blake2Hasher>>::new(
+				&mut db, &mut root,
+			)
+			.build();
+			for (k, v) in &test_data {
+				trie.insert(k, v).expect("Inserts data");
+			}
+		}
+
+		(db, root, test_data)
+	}
+
+	#[test]
+	fn recorder_equivalence_cache() {
+		sp_tracing::try_init_simple();
+		let (db, root, test_data) = create_trie();
+
+		let mut rng = rand::thread_rng();
+		for _ in 1..10 {
+			let reference_recorder = Recorder::default();
+			let recorder_for_test: SizeOnlyRecorderProvider<sp_core::Blake2Hasher> =
+				SizeOnlyRecorderProvider::new();
+			let reference_cache: SharedTrieCache<sp_core::Blake2Hasher> =
+				SharedTrieCache::new(CacheSize::new(1024 * 5));
+			let cache_for_test: SharedTrieCache<sp_core::Blake2Hasher> =
+				SharedTrieCache::new(CacheSize::new(1024 * 5));
+			{
+				let local_cache = cache_for_test.local_cache();
+				let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root);
+				let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
+				let reference_trie =
+					TrieDBBuilder::<sp_trie::LayoutV1<sp_core::Blake2Hasher>>::new(&db, &root)
+						.with_recorder(&mut reference_trie_recorder)
+						.with_cache(&mut trie_cache_for_reference)
+						.build();
+
+				let local_cache_for_test = reference_cache.local_cache();
+				let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root);
+				let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
+				let test_trie =
+					TrieDBBuilder::<sp_trie::LayoutV1<sp_core::Blake2Hasher>>::new(&db, &root)
+						.with_recorder(&mut trie_recorder_under_test)
+						.with_cache(&mut trie_cache_for_test)
+						.build();
+
+				// Access random values from the test data
+				for _ in 0..100 {
+					let index: usize = rng.gen_range(0..test_data.len());
+					test_trie.get(&test_data[index].0).unwrap().unwrap();
+					reference_trie.get(&test_data[index].0).unwrap().unwrap();
+				}
+
+				// Check that we have the same nodes recorded for both recorders
+				for (key, _) in test_data.iter() {
+					let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key);
+					let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key);
+					assert!(matches!(refe, comp));
+				}
+			}
+
+			// Check that we have the same size recorded for both recorders
+			assert_eq!(
+				reference_recorder.estimate_encoded_size(),
+				recorder_for_test.estimate_encoded_size()
+			);
+		}
+	}
+
+	#[test]
+	fn recorder_equivalence_no_cache() {
+		sp_tracing::try_init_simple();
+		let (db, root, test_data) = create_trie();
+
+		let mut rng = rand::thread_rng();
+		for _ in 1..10 {
+			let reference_recorder = Recorder::default();
+			let recorder_for_test: SizeOnlyRecorderProvider<sp_core::Blake2Hasher> =
+				SizeOnlyRecorderProvider::new();
+			{
+				let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
+				let reference_trie =
+					TrieDBBuilder::<sp_trie::LayoutV1<sp_core::Blake2Hasher>>::new(&db, &root)
+						.with_recorder(&mut reference_trie_recorder)
+						.build();
+
+				let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
+				let test_trie =
+					TrieDBBuilder::<sp_trie::LayoutV1<sp_core::Blake2Hasher>>::new(&db, &root)
+						.with_recorder(&mut trie_recorder_under_test)
+						.build();
+
+				for _ in 0..200 {
+					let index: usize = rng.gen_range(0..test_data.len());
+					test_trie.get(&test_data[index].0).unwrap().unwrap();
+					reference_trie.get(&test_data[index].0).unwrap().unwrap();
+				}
+
+				// Check that we have the same nodes recorded for both recorders
+				for (key, _) in test_data.iter() {
+					let refe = reference_trie_recorder.trie_nodes_recorded_for_key(key);
+					let comp = trie_recorder_under_test.trie_nodes_recorded_for_key(key);
+					assert!(matches!(refe, comp));
+				}
+			}
+
+			// Check that we have the same size recorded for both recorders
+			assert_eq!(
+				reference_recorder.estimate_encoded_size(),
+				recorder_for_test.estimate_encoded_size()
+			);
+		}
+	}
+}
diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs
index fed7811744a..467daa88d32 100644
--- a/substrate/client/service/src/client/client.rs
+++ b/substrate/client/service/src/client/client.rs
@@ -429,7 +429,7 @@ where
 						backend.unpin_block(message);
 					} else {
 						log::debug!("Terminating unpin-worker, backend reference was dropped.");
-						return;
+						return
 					}
 				}
 				log::debug!("Terminating unpin-worker, stream terminated.")
@@ -515,7 +515,7 @@ where
 		} = import_block;
 
 		if !intermediates.is_empty() {
-			return Err(Error::IncompletePipeline);
+			return Err(Error::IncompletePipeline)
 		}
 
 		let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?;
@@ -610,20 +610,19 @@ where
 
 		// the block is lower than our last finalized block so it must revert
 		// finality, refusing import.
-		if status == blockchain::BlockStatus::Unknown
-			&& *import_headers.post().number() <= info.finalized_number
-			&& !gap_block
+		if status == blockchain::BlockStatus::Unknown &&
+			*import_headers.post().number() <= info.finalized_number &&
+			!gap_block
 		{
-			return Err(sp_blockchain::Error::NotInFinalizedChain);
+			return Err(sp_blockchain::Error::NotInFinalizedChain)
 		}
 
 		// this is a fairly arbitrary choice of where to draw the line on making notifications,
 		// but the general goal is to only make notifications when we are already fully synced
 		// and get a new chain head.
 		let make_notifications = match origin {
-			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => {
-				true
-			},
+			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast =>
+				true,
 			BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
 		};
 
@@ -657,14 +656,12 @@ where
 									let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
 									let storage_key =
 										match ChildType::from_prefixed_key(storage_key) {
-											Some((ChildType::ParentKeyId, storage_key)) => {
-												storage_key
-											},
-											None => {
+											Some((ChildType::ParentKeyId, storage_key)) =>
+												storage_key,
+											None =>
 												return Err(Error::Backend(
 													"Invalid child storage key.".to_string(),
-												))
-											},
+												)),
 										};
 									let entry = storage
 										.children_default
@@ -689,7 +686,7 @@ where
 							// State root mismatch when importing state. This should not happen in
 							// safe fast sync mode, but may happen in unsafe mode.
 							warn!("Error importing state: State root mismatch.");
-							return Err(Error::InvalidStateRoot);
+							return Err(Error::InvalidStateRoot)
 						}
 						None
 					},
@@ -712,12 +709,11 @@ where
 			)?;
 		}
 
-		let is_new_best = !gap_block
-			&& (finalized
-				|| match fork_choice {
-					ForkChoiceStrategy::LongestChain => {
-						import_headers.post().number() > &info.best_number
-					},
+		let is_new_best = !gap_block &&
+			(finalized ||
+				match fork_choice {
+					ForkChoiceStrategy::LongestChain =>
+						import_headers.post().number() > &info.best_number,
 					ForkChoiceStrategy::Custom(v) => v,
 				});
 
@@ -841,21 +837,18 @@ where
 		let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
 		let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action)
 		{
-			(BlockStatus::KnownBad, _) => {
-				return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad))
-			},
+			(BlockStatus::KnownBad, _) =>
+				return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)),
 			(
 				BlockStatus::InChainPruned,
 				StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)),
 			) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
 			(_, StateAction::ApplyChanges(changes)) => (true, Some(changes)),
-			(BlockStatus::Unknown, _) => {
-				return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent))
-			},
+			(BlockStatus::Unknown, _) =>
+				return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)),
 			(_, StateAction::Skip) => (false, None),
-			(BlockStatus::InChainPruned, StateAction::Execute) => {
-				return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState))
-			},
+			(BlockStatus::InChainPruned, StateAction::Execute) =>
+				return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
 			(BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None),
 			(_, StateAction::Execute) => (true, None),
 			(_, StateAction::ExecuteIfPossible) => (true, None),
@@ -888,7 +881,7 @@ where
 
 				if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root
 				{
-					return Err(Error::InvalidStateRoot);
+					return Err(Error::InvalidStateRoot)
 				}
 				Some(sc_consensus::StorageChanges::Changes(gen_storage_changes))
 			},
@@ -914,7 +907,7 @@ where
 				"Possible safety violation: attempted to re-finalize last finalized block {:?} ",
 				hash,
 			);
-			return Ok(());
+			return Ok(())
 		}
 
 		// Find tree route from last finalized to given block.
@@ -928,7 +921,7 @@ where
 				retracted, info.finalized_hash
 			);
 
-			return Err(sp_blockchain::Error::NotInFinalizedChain);
+			return Err(sp_blockchain::Error::NotInFinalizedChain)
 		}
 
 		// We may need to coercively update the best block if there is more than one
@@ -1008,7 +1001,7 @@ where
 				// since we won't be running the loop below which
 				// would also remove any closed sinks.
 				sinks.retain(|sink| !sink.is_closed());
-				return Ok(());
+				return Ok(())
 			},
 		};
 
@@ -1044,7 +1037,7 @@ where
 
 				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
 
-				return Ok(());
+				return Ok(())
 			},
 		};
 
@@ -1143,18 +1136,17 @@ where
 			.as_ref()
 			.map_or(false, |importing| &hash == importing)
 		{
-			return Ok(BlockStatus::Queued);
+			return Ok(BlockStatus::Queued)
 		}
 
 		let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n));
 		match hash_and_number {
-			Some((hash, number)) => {
+			Some((hash, number)) =>
 				if self.backend.have_state_at(hash, number) {
 					Ok(BlockStatus::InChainWithState)
 				} else {
 					Ok(BlockStatus::InChainPruned)
-				}
-			},
+				},
 			None => Ok(BlockStatus::Unknown),
 		}
 	}
@@ -1190,7 +1182,7 @@ where
 
 		let genesis_hash = self.backend.blockchain().info().genesis_hash;
 		if genesis_hash == target_hash {
-			return Ok(Vec::new());
+			return Ok(Vec::new())
 		}
 
 		let mut current_hash = target_hash;
@@ -1206,7 +1198,7 @@ where
 			current_hash = ancestor_hash;
 
 			if genesis_hash == current_hash {
-				break;
+				break
 			}
 
 			current = ancestor;
@@ -1291,15 +1283,14 @@ where
 		size_limit: usize,
 	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>> {
 		if start_key.len() > MAX_NESTED_TRIE_DEPTH {
-			return Err(Error::Backend("Invalid start key.".to_string()));
+			return Err(Error::Backend("Invalid start key.".to_string()))
 		}
 		let state = self.state_at(hash)?;
 		let child_info = |storage_key: &Vec<u8>| -> sp_blockchain::Result<ChildInfo> {
 			let storage_key = PrefixedStorageKey::new_ref(storage_key);
 			match ChildType::from_prefixed_key(storage_key) {
-				Some((ChildType::ParentKeyId, storage_key)) => {
-					Ok(ChildInfo::new_default(storage_key))
-				},
+				Some((ChildType::ParentKeyId, storage_key)) =>
+					Ok(ChildInfo::new_default(storage_key)),
 				None => Err(Error::Backend("Invalid child storage key.".to_string())),
 			}
 		};
@@ -1311,7 +1302,7 @@ where
 			{
 				Some((child_info(start_key)?, child_root))
 			} else {
-				return Err(Error::Backend("Invalid root start key.".to_string()));
+				return Err(Error::Backend("Invalid root start key.".to_string()))
 			}
 		} else {
 			None
@@ -1355,18 +1346,18 @@ where
 				let size = value.len() + next_key.len();
 				if total_size + size > size_limit && !entries.is_empty() {
 					complete = false;
-					break;
+					break
 				}
 				total_size += size;
 
-				if current_child.is_none()
-					&& sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice())
-					&& !child_roots.contains(value.as_slice())
+				if current_child.is_none() &&
+					sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) &&
+					!child_roots.contains(value.as_slice())
 				{
 					child_roots.insert(value.clone());
 					switch_child_key = Some((next_key.clone(), value.clone()));
 					entries.push((next_key.clone(), value));
-					break;
+					break
 				}
 				entries.push((next_key.clone(), value));
 				current_key = next_key;
@@ -1386,12 +1377,12 @@ where
 					complete,
 				));
 				if !complete {
-					break;
+					break
 				}
 			} else {
 				result[0].0.key_values.extend(entries.into_iter());
 				result[0].1 = complete;
-				break;
+				break
 			}
 		}
 		Ok(result)
@@ -1816,7 +1807,7 @@ where
 		match self.block_rules.lookup(number, &hash) {
 			BlockLookupResult::KnownBad => {
 				trace!("Rejecting known bad block: #{} {:?}", number, hash);
-				return Ok(ImportResult::KnownBad);
+				return Ok(ImportResult::KnownBad)
 			},
 			BlockLookupResult::Expected(expected_hash) => {
 				trace!(
@@ -1825,7 +1816,7 @@ where
 					expected_hash,
 					number
 				);
-				return Ok(ImportResult::KnownBad);
+				return Ok(ImportResult::KnownBad)
 			},
 			BlockLookupResult::NotSpecial => {},
 		}
@@ -1836,12 +1827,10 @@ where
 			.block_status(hash)
 			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
 		{
-			BlockStatus::InChainWithState | BlockStatus::Queued => {
-				return Ok(ImportResult::AlreadyInChain)
-			},
-			BlockStatus::InChainPruned if !import_existing => {
-				return Ok(ImportResult::AlreadyInChain)
-			},
+			BlockStatus::InChainWithState | BlockStatus::Queued =>
+				return Ok(ImportResult::AlreadyInChain),
+			BlockStatus::InChainPruned if !import_existing =>
+				return Ok(ImportResult::AlreadyInChain),
 			BlockStatus::InChainPruned => {},
 			BlockStatus::Unknown => {},
 			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
@@ -2007,9 +1996,8 @@ where
 
 	fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>> {
 		Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) {
-			(Some(header), Some(extrinsics), justifications) => {
-				Some(SignedBlock { block: Block::new(header, extrinsics), justifications })
-			},
+			(Some(header), Some(extrinsics), justifications) =>
+				Some(SignedBlock { block: Block::new(header, extrinsics), justifications }),
 			_ => None,
 		})
 	}
-- 
GitLab